code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
_A : Union[str, Any] ='''1'''
_A : str ='''0'''
_A : Dict ='''1'''
_A : int =ort.SessionOptions()
_A : str =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
_A : Union[str, Any] =['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
_A : Any =ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
_A : int =ort.RunOptions()
_A : int =128
_A : List[str] =1
_A : Optional[Any] =np.ones((batch, sequence), dtype=np.intaa)
_A : List[str] =np.ones((batch, sequence), dtype=np.intaa)
_A : str =np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
_A : Optional[Any] =time.time()
_A : List[str] =2_000
_A : str ={}
for iter in range(max_iters):
_A : Optional[Any] =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_000 / max_iters))
| 41 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCamelCase ( lowerCAmelCase__ : int = 3_5 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Any = x_num * y_den + x_den * y_num
__a : Dict = x_den * y_den
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
__a : Union[str, Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Optional[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
__a : Any = int(sqrt(lowerCAmelCase__ ) )
__a : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
__a : List[str] = x_num * y_num
__a : List[str] = x_den * y_num + x_num * y_den
__a : str = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Optional[int] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
__a : List[str] = x_num * x_num * y_num * y_num
__a : Optional[int] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
__a : Union[str, Any] = int(sqrt(lowerCAmelCase__ ) )
__a : List[str] = int(sqrt(lowerCAmelCase__ ) )
__a : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : int = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216 | 0 |
def A_ ( _lowerCAmelCase ) -> int:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCamelCase : Dict = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(_lowerCAmelCase )
else:
UpperCamelCase : int = sylvester(number - 1 )
UpperCamelCase : List[Any] = num - 1
UpperCamelCase : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 364 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 |
"""simple docstring"""
import requests
a_ = """""" # <-- Put your OpenWeatherMap appid here!
a_ = """https://api.openweathermap.org/data/2.5/"""
def __lowercase ( snake_case_ : str = "Chicago" ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' ,params=locals() ).json()
def __lowercase ( snake_case_ : str = "Kolkata, India" ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' ,params=locals() ).json()
def __lowercase ( snake_case_ : float = 55.68 ,snake_case_ : float = 12.57 ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 179 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
_UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : int = {
'facebook/timesformer': 'https://huggingface.co./facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "timesformer"
def __init__( self : List[str] , A : int=2_2_4 , A : Optional[Any]=1_6 , A : str=3 , A : str=8 , A : Any=7_6_8 , A : Dict=1_2 , A : Optional[Any]=1_2 , A : Any=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.0 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : int=1e-6 , A : Tuple=True , A : Any="divided_space_time" , A : Optional[Any]=0 , **A : Tuple , ) ->str:
super().__init__(**A )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : int = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Optional[int] = num_frames
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = qkv_bias
lowerCamelCase__ : str = attention_type
lowerCamelCase__ : List[str] = drop_path_rate
| 265 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
snake_case : Any = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 203 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "spiece.model"}
__UpperCAmelCase = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co./google/pegasus-xsum/resolve/main/spiece.model"}
}
__UpperCAmelCase = {
"google/pegasus-xsum": 512,
}
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['input_ids', 'attention_mask']
def __init__( self : Any ,lowercase__ : Optional[int] ,lowercase__ : int="<pad>" ,lowercase__ : str="</s>" ,lowercase__ : Tuple="<unk>" ,lowercase__ : Optional[Any]="<mask_2>" ,lowercase__ : int="<mask_1>" ,lowercase__ : Dict=None ,lowercase__ : Dict=1_0_3 ,lowercase__ : Any = None ,**lowercase__ : int ,):
__lowercase = offset
if additional_special_tokens is not None:
if not isinstance(_a ,_a ):
raise TypeError(
F"additional_special_tokens should be of type {type(_a )}, but is"
F" {type(_a )}" )
__lowercase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(_a ) ,self.offset - 1 )
]
if len(set(_a ) ) != len(_a ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase = additional_special_tokens_extended
else:
__lowercase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 ,self.offset )]
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a ,unk_token=_a ,mask_token=_a ,pad_token=_a ,mask_token_sent=_a ,offset=_a ,additional_special_tokens=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
__lowercase = mask_token_sent
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# add special tokens to encoder dict
__lowercase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 ,self.offset - 1 )} )
__lowercase = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Any ):
return self.sp_model.encode(_a ,out_type=_a )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase = self.sp_model.piece_to_id(_a )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[Any] ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ):
__lowercase = []
__lowercase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
__lowercase = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any]=False ):
return 1
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : List[str] = None ,lowercase__ : Optional[Any] = False ):
if already_has_special_tokens:
return self._special_token_mask(_a )
elif token_ids_a is None:
return self._special_token_mask(_a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Tuple ,lowercase__ : List[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : str = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_a ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 369 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co./huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co./models?filter=informer
}
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = "informer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "student_t" , lowerCamelCase_ = "nll" , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = "mean" , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 64 , lowerCamelCase_ = 32 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = True , lowerCamelCase_ = "gelu" , lowerCamelCase_ = 0.05 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 100 , lowerCamelCase_ = 0.02 , lowerCamelCase_=True , lowerCamelCase_ = "prob" , lowerCamelCase_ = 5 , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = prediction_length
a = context_length or prediction_length
a = distribution_output
a = loss
a = input_size
a = num_time_features
a = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a = scaling
a = num_dynamic_real_features
a = num_static_real_features
a = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a = cardinality
else:
a = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a = embedding_dimension
else:
a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a = num_parallel_samples
# Transformer architecture configuration
a = input_size * len(self.lags_sequence ) + self._number_of_features
a = d_model
a = encoder_attention_heads
a = decoder_attention_heads
a = encoder_ffn_dim
a = decoder_ffn_dim
a = encoder_layers
a = decoder_layers
a = dropout
a = attention_dropout
a = activation_dropout
a = encoder_layerdrop
a = decoder_layerdrop
a = activation_function
a = init_std
a = use_cache
# Informer
a = attention_type
a = sampling_factor
a = distil
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 227 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_a : Dict= logging.get_logger(__name__)
class UpperCamelCase ( __A ):
def __init__(self : int , *_A : Any , **_A : Any) -> Dict:
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 172 | from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co./huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co./models?filter=time_series_transformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''time_series_transformer'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 3_2 , lowercase = 3_2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 6_4 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1_0_0 , lowercase = 0.02 , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Any = distribution_output
A_ : Dict = loss
A_ : int = input_size
A_ : Any = num_time_features
A_ : List[str] = lags_sequence
A_ : List[Any] = scaling
A_ : str = num_dynamic_real_features
A_ : List[Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = cardinality
else:
A_ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : int = embedding_dimension
else:
A_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(lowercase ) + self._number_of_features
A_ : List[str] = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : str = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Tuple = decoder_layers
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : int = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 140 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Union[str, Any] = logging.getLogger()
def a_ ( __lowercase : Path , __lowercase : list ) -> Tuple:
_snake_case = '\n'.join(__lowercase )
Path(__lowercase ).open('w' ).writelines(__lowercase )
_lowerCamelCase : Any = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/bart-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/tiny-mbart'''
_lowerCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowercase , lowercase )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
run_generate()
assert Path(lowercase ).exists()
# os.remove(Path(output_file_name))
def A ( self : List[Any] ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : int ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : str ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / 'scores.json' )
_snake_case = str(tmp_dir / 'val.target' )
_dump_articles(lowercase , text['en'] )
_dump_articles(lowercase , text['de'] )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{str(lowercase )}
{str(lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase , 'argv' , lowercase ):
with CaptureStdout() as cs:
run_search()
_snake_case = [' num_beams | length_penalty', model, 'Best score args']
_snake_case = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase ).exists()
os.remove(Path(lowercase ) ) | 130 |
from __future__ import annotations
import requests
_lowerCamelCase : List[str] = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def a_ ( __lowercase : str , __lowercase : int = 1 , __lowercase : str = "new" , __lowercase : list | None = None ) -> dict:
_snake_case = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
_snake_case = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case = {}
for id_ in range(__lowercase ):
_snake_case = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 130 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Any = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 0
while b > 0:
if b & 1:
lowerCAmelCase_ :str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 84 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "vision-encoder-decoder"
A__ : List[Any] = True
def __init__( self: Union[str, Any] ,**lowerCamelCase_: Any ) -> Dict:
super().__init__(**lowerCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCAmelCase_ : List[str] = kwargs.pop("""encoder""" )
UpperCAmelCase_ : Any = encoder_config.pop("""model_type""" )
UpperCAmelCase_ : str = kwargs.pop("""decoder""" )
UpperCAmelCase_ : Tuple = decoder_config.pop("""model_type""" )
UpperCAmelCase_ : int = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : str = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Any = True
@classmethod
def A__ ( cls: Tuple ,lowerCamelCase_: PretrainedConfig ,lowerCamelCase_: PretrainedConfig ,**lowerCamelCase_: str ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase_ )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = self.encoder.to_dict()
UpperCAmelCase_ : str = self.decoder.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.__class__.model_type
return output
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = version.parse("1.11" )
@property
def A__ ( self: List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self: Optional[Any] ) -> float:
return 1e-4
@property
def A__ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Union[str, Any] = OrderedDict()
UpperCAmelCase_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCAmelCase_ : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCAmelCase_ : List[str] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def A__ ( self: Union[str, Any] ,lowerCamelCase_: "PreTrainedTokenizerBase" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ : Dict = OrderedDict()
UpperCAmelCase_ : Union[str, Any] = super().generate_dummy_inputs(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
UpperCAmelCase_ : Dict = dummy_input["""input_ids"""].shape
UpperCAmelCase_ : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
UpperCAmelCase_ : List[str] = dummy_input.pop("""attention_mask""" )
UpperCAmelCase_ : List[str] = torch.zeros(lowerCamelCase_ )
return common_inputs
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Any ) -> None:
pass
def A__ ( self: Optional[Any] ,lowerCamelCase_: PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: PretrainedConfig ,lowerCamelCase_: PretrainedConfig ,lowerCamelCase_: str = "default" ) -> OnnxConfig:
UpperCAmelCase_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase_ ,lowerCamelCase_ )
| 352 |
def lowerCamelCase_ ( _a : int = 50 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 59 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = IFInpaintingPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 39 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co./bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co./bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co./{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" ,[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
UpperCAmelCase_ : str = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" ,[
DatasetInfo(),
DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,),
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Dict = str(A__ )
dataset_info.write_to_directory(A__ )
UpperCAmelCase_ : Union[str, Any] = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ ,"dataset_info.json" ) )
def snake_case ( ):
UpperCAmelCase_ : Any = DatasetInfo(
description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
UpperCAmelCase_ : Any = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
UpperCAmelCase_ : List[Any] = yaml.safe_dump(A__ )
UpperCAmelCase_ : Tuple = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def snake_case ( ):
UpperCAmelCase_ : Optional[Any] = DatasetInfo()
UpperCAmelCase_ : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" ,[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
UpperCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ ,"README.md" ) )
| 355 |
"""simple docstring"""
import numpy as np
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase_ : List[Any] = ya
UpperCAmelCase_ : Optional[int] = xa
for k in range(A__ ):
UpperCAmelCase_ : List[str] = f(A__ ,y[k] )
UpperCAmelCase_ : Any = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Dict = f(x + h ,y[k] + h * ka )
UpperCAmelCase_ : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
A__ = [[] for _ in range(UpperCAmelCase__)]
A__ = size
def __getitem__( self : List[Any] , UpperCAmelCase__ : int) ->Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex])
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
return self._size
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Optional[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(UpperCAmelCase__ , UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->int | None:
'''simple docstring'''
A__ = deque([start_vertex])
A__ = [None] * self.size
A__ = 0
while queue:
A__ = queue.popleft()
A__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A__ = current_distance + edge.weight
A__ = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
A__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 |
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "emb" in name:
lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowercase__ : Any = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowercase__ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowercase__ : int = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowercase__ : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(state_dict.keys() )
lowercase__ : Dict = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : Optional[int] = val[:hidden_size, :]
lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = 24
lowercase__ : Optional[Any] = 16
elif checkpoint == "medium":
lowercase__ : str = 1_536
lowercase__ : Union[str, Any] = 48
lowercase__ : Optional[int] = 24
elif checkpoint == "large":
lowercase__ : Tuple = 2_048
lowercase__ : Union[str, Any] = 48
lowercase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : int = MusicgenDecoderConfig(
hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , )
return config
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ):
"""simple docstring"""
lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ )
lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ )
lowercase__ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ : Tuple = rename_state_dict(
lowerCamelCase__ , hidden_size=decoder_config.hidden_size )
lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" )
lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ )
# check we can do a forward pass
lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" )
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# set the appropriate bos/pad token ids
lowercase__ : List[Any] = 2_048
lowercase__ : List[Any] = 2_048
# set other default generation config params
lowercase__ : str = int(30 * audio_encoder.config.frame_rate )
lowercase__ : List[Any] = True
lowercase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase__ )
processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 130 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 130 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Optional[int] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 243 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] ={
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : str ):
class UpperCAmelCase :
def __init__(self : Optional[int] , snake_case__ : str ) -> Any:
'''simple docstring'''
snake_case : List[str] = metric_id
class UpperCAmelCase :
A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ):
if "tmp_path" in args:
snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase , match="https://huggingface.co./docs/evaluate" ):
func(*__lowerCamelCase )
| 59 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger("""transformers.models.speecht5""")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
hf_model.apply_weight_norm()
__lowerCAmelCase : List[Any] = checkpoint['input_conv.weight_g']
__lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
__lowerCAmelCase : int = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase : List[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
__lowerCAmelCase : List[str] = checkpoint[F"upsamples.{i}.1.weight_v"]
__lowerCAmelCase : Any = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase : Any = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
__lowerCAmelCase : Dict = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
__lowerCAmelCase : Dict = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
__lowerCAmelCase : Optional[int] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
__lowerCAmelCase : Any = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
__lowerCAmelCase : Optional[int] = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
__lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
__lowerCAmelCase : Any = checkpoint['output_conv.1.weight_v']
__lowerCAmelCase : List[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , ):
if config_path is not None:
__lowerCAmelCase : Tuple = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
__lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
__lowerCAmelCase : Optional[Any] = SpeechTaHifiGan(_UpperCAmelCase )
__lowerCAmelCase : Dict = torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase : List[Any] = np.load(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = stats[0].reshape(-1 )
__lowerCAmelCase : Dict = stats[1].reshape(-1 )
__lowerCAmelCase : str = torch.from_numpy(_UpperCAmelCase ).float()
__lowerCAmelCase : str = torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 350 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() ) | 182 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A: List[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
UpperCAmelCase : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id="""test-model-flax""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCAmelCase : List[str] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
UpperCAmelCase : Any = True
UpperCAmelCase : Dict = flatten_dict(modela.params )
UpperCAmelCase : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Tuple = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : int = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : Tuple = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size="""10KB""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = """bert"""
UpperCAmelCase : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = """bert"""
UpperCAmelCase : Dict = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 109 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase : Any = logging.getLogger(__name__)
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = label_idx
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = mode.value
SCREAMING_SNAKE_CASE_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Dict = []
else:
SCREAMING_SNAKE_CASE_ : List[str] = line.split(' ' )
words.append(splits[0] )
if len(_SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _A ( __magic_name__):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : int = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _A ( __magic_name__):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = mode.value
SCREAMING_SNAKE_CASE_ : str = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = preds_list[example_id]
SCREAMING_SNAKE_CASE_ : Any = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE )
example_id += 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 253 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCAmelCase : Dict = array[indexa], array[indexa]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : List[str] = int(length / 2 )
for i in range(_lowerCamelCase , low + middle ):
comp_and_swap(_lowerCamelCase , _lowerCamelCase , i + middle , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , low + middle , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : Any = int(length / 2 )
bitonic_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
bitonic_sort(_lowerCamelCase , low + middle , _lowerCamelCase , 0 )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 359 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCamelCase__ : Union[str, Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return number | (1 << position)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return number & ~(1 << position)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return number ^ (1 << position)
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 275 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Union[str, Any] = self.dummy_uncond_unet
__magic_name__ : str = KarrasVeScheduler()
__magic_name__ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : str = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
__magic_name__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[int] = 'google/ncsnpp-celebahq-256'
__magic_name__ : List[str] = UNetaDModel.from_pretrained(_A )
__magic_name__ : int = KarrasVeScheduler()
__magic_name__ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 275 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out) | 243 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ = "lm_head"
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
a_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 42
a_ = 43
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = UniSpeechForCTC(UpperCAmelCase )
else:
a_ = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 243 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = []
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
self.events.append("""on_init_end""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self.events.append("""on_train_begin""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
self.events.append("""on_train_end""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
self.events.append("""on_epoch_begin""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
self.events.append("""on_epoch_end""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self.events.append("""on_step_begin""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self.events.append("""on_step_end""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
self.events.append("""on_evaluate""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
self.events.append("""on_predict""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
self.events.append("""on_save""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
self.events.append("""on_log""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
self.events.append("""on_prediction_step""" )
@require_torch
class _A ( unittest.TestCase ):
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
def __A ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __A ( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=64 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__UpperCAmelCase : Union[str, Any] = RegressionDataset(length=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = RegressionDataset(length=__UpperCAmelCase )
__UpperCAmelCase : Any = RegressionModelConfig(a=__UpperCAmelCase , b=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = RegressionPreTrainedModel(__UpperCAmelCase )
__UpperCAmelCase : str = TrainingArguments(self.output_dir , disable_tqdm=__UpperCAmelCase , report_to=[] , **__UpperCAmelCase )
return Trainer(
__UpperCAmelCase , __UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , callbacks=__UpperCAmelCase , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
# Order doesn't matter
__UpperCAmelCase : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ )
__UpperCAmelCase : int = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , cba.__class__ )
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(cba.__class__ , __UpperCAmelCase )
else:
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = ["""on_init_end""", """on_train_begin"""]
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Dict = len(trainer.get_eval_dataloader() )
__UpperCAmelCase : List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = self.get_trainer()
__UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
__UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__UpperCAmelCase : Any = self.get_trainer(disable_tqdm=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__UpperCAmelCase : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
__UpperCAmelCase : Optional[int] = self.get_trainer()
__UpperCAmelCase : Union[str, Any] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(cb.__class__ , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
# We can also add, pop, or remove by instance
__UpperCAmelCase : int = self.get_trainer()
__UpperCAmelCase : Dict = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
__UpperCAmelCase : Optional[int] = self.get_trainer()
__UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[0]
__UpperCAmelCase : List[Any] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 , __UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__UpperCAmelCase )
__UpperCAmelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__UpperCAmelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# Independent log/save/eval
__UpperCAmelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
__UpperCAmelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
__UpperCAmelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
__UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
__UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# A bit of everything
__UpperCAmelCase : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
__UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
__UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
| 16 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : List[Any] = scope
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCAmelCase : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Optional[Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : str = """multi_label_classification"""
__UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase )
# greedy generation outputs
__UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 16 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def A ( _lowercase ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowercase , 0 , _lowercase , args=(_lowercase) )[0]
def A ( _lowercase , _lowercase ):
return math.pow(_lowercase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 182 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co./sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co./models?filter=deformable-detr
}
class _SCREAMING_SNAKE_CASE ( a__):
_UpperCamelCase:int = 'deformable_detr'
_UpperCamelCase:List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ =CONFIG_MAPPING['''resnet'''](out_features=["""stage4"""] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ =backbone_config.get("""model_type""" )
lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ =config_class.from_dict(_lowerCamelCase )
lowerCamelCase_ =use_timm_backbone
lowerCamelCase_ =backbone_config
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_queries
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =init_xavier_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =auxiliary_loss
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =backbone
lowerCamelCase_ =use_pretrained_backbone
lowerCamelCase_ =dilation
# deformable attributes
lowerCamelCase_ =num_feature_levels
lowerCamelCase_ =encoder_n_points
lowerCamelCase_ =decoder_n_points
lowerCamelCase_ =two_stage
lowerCamelCase_ =two_stage_num_proposals
lowerCamelCase_ =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =mask_loss_coefficient
lowerCamelCase_ =dice_loss_coefficient
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
lowerCamelCase_ =focal_alpha
lowerCamelCase_ =disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _snake_case ( self )-> int:
return self.encoder_attention_heads
@property
def _snake_case ( self )-> int:
return self.d_model
def _snake_case ( self )-> Any:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ =self.backbone_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 369 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : List[str] = logging.getLogger(__name__)
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase_ =parser.parse_args()
return args
def __UpperCamelCase ( _A : Dict ) ->Optional[int]:
"""simple docstring"""
def fn(_A : List[Any] ):
return tokenizer(examples["""text"""] )
return fn
def __UpperCamelCase ( _A : Dict ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase_ ={
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase_ =tf.train.Features(feature=_A )
lowerCamelCase_ =tf.train.Example(features=_A )
lowerCamelCase_ =example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : Any ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ =min(len(_A ) , args.limit )
lowerCamelCase_ =dataset.select(range(_A ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ =tokenize_function(_A )
lowerCamelCase_ =dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : Any ):
# Concatenate all texts.
lowerCamelCase_ ={k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ ={
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ =dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 )
lowerCamelCase_ =0
lowerCamelCase_ =0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCamelCase_ =grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ =len(dataset_snapshot["""input_ids"""] )
lowerCamelCase_ =os.path.join(_A , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase_ =get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCamelCase_ =serialized_examples[i]
out_file.write(_A )
print("""Wrote file {} containing {} records""".format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=_A )
if __name__ == "__main__":
__A : Dict = parse_args()
main(args)
| 49 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
SCREAMING_SNAKE_CASE_ : List[str] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
SCREAMING_SNAKE_CASE_ : str = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Any=None , UpperCamelCase: int=True , UpperCamelCase: Any=False ):
"""simple docstring"""
if rouge_types is None:
A__ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
A__ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase )
if use_aggregator:
A__ = scoring.BootstrapAggregator()
else:
A__ = []
for ref, pred in zip(UpperCamelCase , UpperCamelCase ):
A__ = scorer.score(UpperCamelCase , UpperCamelCase )
if use_aggregator:
aggregator.add_scores(UpperCamelCase )
else:
scores.append(UpperCamelCase )
if use_aggregator:
A__ = aggregator.aggregate()
else:
A__ = {}
for key in scores[0]:
A__ = [score[key] for score in scores]
return result
| 335 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : int = {"""ctrl""": """https://huggingface.co./ctrl/resolve/main/config.json"""}
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= "ctrl"
_a : str= ["past_key_values"]
_a : List[Any]= {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,snake_case=246534 ,snake_case=256 ,snake_case=1280 ,snake_case=8192 ,snake_case=48 ,snake_case=16 ,snake_case=0.1 ,snake_case=0.1 ,snake_case=1e-6 ,snake_case=0.02 ,snake_case=True ,**snake_case ,):
'''simple docstring'''
lowercase : Optional[int] = vocab_size
lowercase : Union[str, Any] = n_positions
lowercase : List[Any] = n_embd
lowercase : Optional[int] = n_layer
lowercase : str = n_head
lowercase : Optional[Any] = dff
lowercase : List[str] = resid_pdrop
lowercase : int = embd_pdrop
lowercase : str = layer_norm_epsilon
lowercase : Optional[int] = initializer_range
lowercase : Dict = use_cache
super().__init__(**snake_case )
| 359 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = 1
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : Dict = (32, 32)
__lowerCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Tuple = self.dummy_cond_unet_upscale
__lowerCAmelCase : List[Any] = DDPMScheduler()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : Optional[Any] = self.dummy_vae
__lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
__lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : str = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
__lowerCAmelCase : Optional[int] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : int = torch.Generator(device=A_ ).manual_seed(0 )
__lowerCAmelCase : List[str] = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Any = output.images
__lowerCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(0 )
__lowerCAmelCase : str = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase : Any = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Union[str, Any] = self.dummy_cond_unet_upscale
__lowerCAmelCase : List[str] = DDPMScheduler()
__lowerCAmelCase : List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : int = self.dummy_vae
__lowerCAmelCase : Any = self.dummy_text_encoder
__lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Dict = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
__lowerCAmelCase : str = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : int = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : int = output.images
assert image.shape[0] == 2
__lowerCAmelCase : List[str] = torch.Generator(device=A_ ).manual_seed(0 )
__lowerCAmelCase : int = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.dummy_cond_unet_upscale
__lowerCAmelCase : Any = DDPMScheduler()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCAmelCase : List[Any] = self.dummy_vae
__lowerCAmelCase : Dict = self.dummy_text_encoder
__lowerCAmelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : int = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase : Any = unet.half()
__lowerCAmelCase : str = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
__lowerCAmelCase : List[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : str = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , ).images
__lowerCAmelCase : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = load_image(
'''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : int = load_numpy(
'''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__lowerCAmelCase : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__lowerCAmelCase : Union[str, Any] = '''a cat sitting on a park bench'''
__lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
__lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = load_image(
'''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__lowerCAmelCase : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : int = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__lowerCAmelCase : List[Any] = '''a cat sitting on a park bench'''
__lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
__lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase : Dict = load_image(
'''https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCAmelCase : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase : str = '''a cat sitting on a park bench'''
__lowerCAmelCase : int = torch.manual_seed(0 )
__lowerCAmelCase : int = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='''np''' , )
__lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 275 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"microsoft/table-transformer-detection": (
"https://huggingface.co./microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """table-transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A_ , A_ ):
__lowerCAmelCase : int = backbone_config.get('''model_type''' )
__lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Any = config_class.from_dict(A_ )
# set timm attributes to None
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None
__lowerCAmelCase : Tuple = use_timm_backbone
__lowerCAmelCase : Optional[Any] = backbone_config
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Tuple = num_queries
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_ffn_dim
__lowerCAmelCase : Optional[int] = encoder_layers
__lowerCAmelCase : List[str] = encoder_attention_heads
__lowerCAmelCase : str = decoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = decoder_layers
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[int] = dropout
__lowerCAmelCase : Any = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Tuple = init_xavier_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[Any] = auxiliary_loss
__lowerCAmelCase : Optional[Any] = position_embedding_type
__lowerCAmelCase : Tuple = backbone
__lowerCAmelCase : Any = use_pretrained_backbone
__lowerCAmelCase : int = dilation
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : List[str] = bbox_cost
__lowerCAmelCase : int = giou_cost
# Loss coefficients
__lowerCAmelCase : Optional[Any] = mask_loss_coefficient
__lowerCAmelCase : Tuple = dice_loss_coefficient
__lowerCAmelCase : int = bbox_loss_coefficient
__lowerCAmelCase : List[Any] = giou_loss_coefficient
__lowerCAmelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.d_model
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-5
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 12
| 275 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( a__ , a__=False ) -> str:
try:
__SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
__SCREAMING_SNAKE_CASE = strtobool(a__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase__ : Optional[int] =parse_flag_from_env('''RUN_SLOW''', default=False)
lowerCAmelCase__ : Tuple =parse_flag_from_env('''RUN_REMOTE''', default=False)
lowerCAmelCase__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
lowerCAmelCase__ : Optional[Any] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowerCAmelCase__ : List[Any] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowerCAmelCase__ : str =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowerCAmelCase__ : Optional[int] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowerCAmelCase__ : int =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowerCAmelCase__ : Union[str, Any] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowerCAmelCase__ : List[str] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowerCAmelCase__ : Optional[Any] =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowercase ( a__ ) -> Tuple:
try:
import faiss # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss' )(a__ )
return test_case
def __lowercase ( a__ ) -> Tuple:
try:
import regex # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires regex' )(a__ )
return test_case
def __lowercase ( a__ ) -> Optional[int]:
try:
import elasticsearch # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch' )(a__ )
return test_case
def __lowercase ( a__ ) -> Optional[int]:
try:
import sqlalchemy # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy' )(a__ )
return test_case
def __lowercase ( a__ ) -> Optional[int]:
if not config.TORCH_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch' )(a__ )
return test_case
def __lowercase ( a__ ) -> Dict:
if not config.TF_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow' )(a__ )
return test_case
def __lowercase ( a__ ) -> Optional[int]:
if not config.JAX_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX' )(a__ )
return test_case
def __lowercase ( a__ ) -> Dict:
if not config.PIL_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow' )(a__ )
return test_case
def __lowercase ( a__ ) -> Dict:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(a__ )
else:
return test_case
def __lowercase ( a__ ) -> List[Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(a__ )
else:
return test_case
def __lowercase ( a__ ) -> List[Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(a__ )
else:
return test_case
def __lowercase ( a__ ) -> int:
def _require_spacy_model(a__ ):
try:
import spacy # noqa F401
spacy.load(a__ )
except ImportError:
return unittest.skip('test requires spacy' )(a__ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(a__ ) )(a__ )
else:
return test_case
return _require_spacy_model
def __lowercase ( a__ ) -> List[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(a__ )
else:
return test_case
def __lowercase ( a__ ) -> Dict:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(a__ )
else:
return test_case
def __lowercase ( a__ ) -> Union[str, Any]:
if not _run_slow_tests or _run_slow_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is slow' )(a__ )
return test_case
def __lowercase ( a__ ) -> Dict:
if not _run_local_tests or _run_local_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is local' )(a__ )
return test_case
def __lowercase ( a__ ) -> int:
if not _run_packaged_tests or _run_packaged_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is packaged' )(a__ )
return test_case
def __lowercase ( a__ ) -> Any:
if not _run_remote_tests or _run_remote_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires remote' )(a__ )
return test_case
def __lowercase ( *a__ ) -> Optional[int]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(a__ ) and name.startswith('test' ):
for decorator in decorators:
__SCREAMING_SNAKE_CASE = decorator(a__ )
setattr(cls , a__ , a__ )
return cls
return decorate
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Any = 1
UpperCamelCase__ : Tuple = 2
@contextmanager
def __lowercase ( a__=OfflineSimulationMode.CONNECTION_FAILS , a__=1E-16 ) -> Tuple:
__SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(a__ , a__ , a__ , **a__ ):
# Change the url to an invalid url so that the connection hangs
__SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__SCREAMING_SNAKE_CASE = timeout
try:
return online_request(a__ , a__ , **a__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__SCREAMING_SNAKE_CASE = url
__SCREAMING_SNAKE_CASE = e.args[0]
__SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , f"""OfflineMock[{url}]""" ),)
__SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(a__ , a__ , **a__ ):
raise requests.ConnectionError('Offline mode is enabled.' , request=a__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , a__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , a__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , a__ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __lowercase ( *a__ , **a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a__ , **a__ ) as tmp_dir:
try:
os.chdir(a__ )
yield
finally:
os.chdir(a__ )
@contextmanager
def __lowercase ( ) -> List[Any]:
import gc
gc.collect()
__SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ) -> List[str]:
import gc
gc.collect()
__SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( a__ , a__ ) -> int:
return deepcopy(a__ ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(a__ ).integers(0 , 1_00 , 10 ).tolist()
def __lowercase ( a__ ) -> Any:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a__ , *a__ , **a__ ):
try:
return func(*a__ , **a__ )
except HTTPError as err:
if str(a__ ).startswith('500' ) or str(a__ ).startswith('502' ):
pytest.xfail(str(a__ ) )
raise err
return decorator.decorator(_wrapper , a__ )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = returncode
__SCREAMING_SNAKE_CASE = stdout
__SCREAMING_SNAKE_CASE = stderr
async def __lowercase ( a__ , a__ ) -> Optional[int]:
while True:
__SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(a__ )
else:
break
async def __lowercase ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(a__ ) )
__SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def tee(a__ , a__ , a__ , a__="" ):
__SCREAMING_SNAKE_CASE = line.decode('utf-8' ).rstrip()
sink.append(a__ )
if not quiet:
print(a__ , a__ , file=a__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a__ : tee(a__ , a__ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda a__ : tee(a__ , a__ , sys.stderr , label='stderr:' ) ),
] , timeout=a__ , )
return _RunOutput(await p.wait() , a__ , a__ )
def __lowercase ( a__ , a__=None , a__=None , a__=1_80 , a__=False , a__=True ) -> _RunOutput:
__SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
__SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(a__ , env=a__ , stdin=a__ , timeout=a__ , quiet=a__ , echo=a__ ) )
__SCREAMING_SNAKE_CASE = ' '.join(a__ )
if result.returncode > 0:
__SCREAMING_SNAKE_CASE = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def __lowercase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , a__ , 0 , re.M )
return int(a__ )
def __lowercase ( ) -> int:
__SCREAMING_SNAKE_CASE = 2_95_00
__SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 118 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__SCREAMING_SNAKE_CASE = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split='train' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = con
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = to_sql_kwargs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('sql' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('con' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('index' , _A )
__SCREAMING_SNAKE_CASE = self._write(index=_A , **self.to_sql_kwargs )
return written
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE = batch.to_pandas()
__SCREAMING_SNAKE_CASE = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _A ( self , _A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 118 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[int] = []
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : List[str] ,**_snake_case : int ) -> Tuple:
"""simple docstring"""
self.events.append('''on_init_end''' )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Dict ,_snake_case : Tuple ,**_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
self.events.append('''on_train_begin''' )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
self.events.append('''on_train_end''' )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ,**_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
self.events.append('''on_epoch_begin''' )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,**_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_epoch_end''' )
def UpperCAmelCase ( self : Tuple ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : Dict ,**_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
self.events.append('''on_step_begin''' )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ,_snake_case : int ,_snake_case : List[str] ,**_snake_case : List[Any] ) -> int:
"""simple docstring"""
self.events.append('''on_step_end''' )
def UpperCAmelCase ( self : Any ,_snake_case : Optional[int] ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,**_snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
self.events.append('''on_evaluate''' )
def UpperCAmelCase ( self : List[str] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : int ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_predict''' )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : List[str] ,**_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
self.events.append('''on_save''' )
def UpperCAmelCase ( self : int ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : List[Any] ,**_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_log''' )
def UpperCAmelCase ( self : List[str] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : Dict ,**_snake_case : int ) -> Dict:
"""simple docstring"""
self.events.append('''on_prediction_step''' )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : List[str] = tempfile.mkdtemp()
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=0 ,_snake_case : Union[str, Any]=0 ,_snake_case : Tuple=64 ,_snake_case : Any=64 ,_snake_case : List[Any]=None ,_snake_case : Any=False ,**_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = RegressionDataset(length=_snake_case )
lowercase__ : List[Any] = RegressionDataset(length=_snake_case )
lowercase__ : Tuple = RegressionModelConfig(a=_snake_case ,b=_snake_case )
lowercase__ : Dict = RegressionPreTrainedModel(_snake_case )
lowercase__ : Optional[Any] = TrainingArguments(self.output_dir ,disable_tqdm=_snake_case ,report_to=[] ,**_snake_case )
return Trainer(
_snake_case ,_snake_case ,train_dataset=_snake_case ,eval_dataset=_snake_case ,callbacks=_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(_snake_case ) ,len(_snake_case ) )
# Order doesn't matter
lowercase__ : str = sorted(_snake_case ,key=lambda _snake_case : cb.__name__ if isinstance(_snake_case ,_snake_case ) else cb.__class__.__name__ )
lowercase__ : Optional[Any] = sorted(_snake_case ,key=lambda _snake_case : cb.__name__ if isinstance(_snake_case ,_snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(_snake_case ,_snake_case ):
if isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case ):
self.assertEqual(_snake_case ,_snake_case )
elif isinstance(_snake_case ,_snake_case ) and not isinstance(_snake_case ,_snake_case ):
self.assertEqual(_snake_case ,cba.__class__ )
elif not isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case ):
self.assertEqual(cba.__class__ ,_snake_case )
else:
self.assertEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = ['''on_init_end''', '''on_train_begin''']
lowercase__ : List[str] = 0
lowercase__ : Dict = len(trainer.get_eval_dataloader() )
lowercase__ : Any = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(_snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = self.get_trainer()
lowercase__ : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Union[str, Any] = self.get_trainer(disable_tqdm=_snake_case )
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_snake_case )
expected_callbacks.remove(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : int = trainer.pop_callback(_snake_case )
self.assertEqual(cb.__class__ ,_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
trainer.add_callback(_snake_case )
expected_callbacks.insert(0 ,_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
# We can also add, pop, or remove by instance
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_snake_case )
expected_callbacks.remove(_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[0]
lowercase__ : Tuple = trainer.pop_callback(_snake_case )
self.assertEqual(_snake_case ,_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
trainer.add_callback(_snake_case )
expected_callbacks.insert(0 ,_snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=_snake_case )
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
# Independent log/save/eval
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
lowercase__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_snake_case ,self.get_expected_events(_snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
lowercase__ : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(_snake_case ) in warn_mock.call_args[0][0]
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE_:Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
A : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A : Tuple = value
elif weight_type == "weight_g":
A : List[str] = value
elif weight_type == "weight_v":
A : Dict = value
elif weight_type == "bias":
A : Optional[int] = value
else:
A : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Optional[int] = []
A : str = fairseq_model.state_dict()
A : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A : Tuple = None
for name, value in fairseq_dict.items():
A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
A : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
A : Optional[Any] = fairseq_model.proj
A : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A : Optional[Any] = True
if "*" in mapped_key:
A : List[str] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
A : int = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
A : List[Any] = """weight_g"""
elif "weight_v" in name:
A : Optional[int] = """weight_v"""
elif "bias" in name:
A : str = """bias"""
elif "weight" in name:
A : Tuple = """weight"""
else:
A : Any = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Any = full_name.split("""conv_layers.""" )[-1]
A : int = name.split(""".""" )
A : str = int(items[0] )
A : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A , A : Tuple = emb.weight.shape
A : Union[str, Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A : List[Any] = emb.weight.data
return lin_layer
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
A : Any = f.readlines()
A : Optional[int] = [line.split(""" """ )[0] for line in lines]
A : List[Any] = len(_lowerCAmelCase )
A : str = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str:
"""simple docstring"""
A : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A : Any = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A , A , A : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
A : int = model[0].eval()
# set weights for wav2vec2 encoder
A : Optional[int] = WavaVecaModel(_lowerCAmelCase )
A : Tuple = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A : Optional[int] = SpeechaTextaForCausalLM(_lowerCAmelCase )
A , A : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
A : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A : Any = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A : Optional[Any] = False
# add projection layer
A : Union[str, Any] = nn.Parameter(projection_layer.weight )
A : str = nn.Parameter(projection_layer.bias )
A : Union[str, Any] = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A : Optional[int] = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A : Any = hf_wavavec.config.to_dict()
A : Optional[int] = tokenizer.pad_token_id
A : List[str] = tokenizer.bos_token_id
A : Dict = tokenizer.eos_token_id
A : List[str] = """speech_to_text_2"""
A : Tuple = """wav2vec2"""
A : Any = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE_:List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 115 |
import os
from pathlib import Path
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
from torch.utils.cpp_extension import load
A : Any = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 115 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A ( UpperCamelCase_ : list[int | str]) -> None:
'''simple docstring'''
create_state_space_tree(UpperCamelCase_, [], 0, [0 for i in range(len(UpperCamelCase_))])
def _A ( UpperCamelCase_ : list[int | str], UpperCamelCase_ : list[int | str], UpperCamelCase_ : int, UpperCamelCase_ : list[int], ) -> None:
'''simple docstring'''
if index == len(UpperCamelCase_):
print(UpperCamelCase_)
return
for i in range(len(UpperCamelCase_)):
if not index_used[i]:
current_sequence.append(sequence[i])
__lowercase = True
create_state_space_tree(UpperCamelCase_, UpperCamelCase_, index + 1, UpperCamelCase_)
current_sequence.pop()
__lowercase = False
_a = [3, 1, 2, 4]
generate_all_permutations(sequence)
_a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 17 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__SCREAMING_SNAKE_CASE : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _a ( ) -> Tuple:
snake_case_ = os.path.dirname(os.path.realpath(a__ ) )
snake_case_ = os.path.join(a__ , """words.txt""" )
snake_case_ = """"""
with open(a__ ) as f:
snake_case_ = f.readline()
snake_case_ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
snake_case_ = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 361 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'google/canine-s': 'https://huggingface.co./google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co./models?filter=canine
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[str] = """canine"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=16_384 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=0XE000 , UpperCAmelCase_ : Optional[int]=0XE001 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : Dict=16_384 , UpperCAmelCase_ : Optional[int]=128 , **UpperCAmelCase_ : Any , ) ->int:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 233 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case : int = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case : List[str] = concatenate_datasets
snake_case : str = DownloadConfig
snake_case : Tuple = DownloadManager
snake_case : Union[str, Any] = DownloadMode
snake_case : Union[str, Any] = DownloadConfig
snake_case : List[Any] = DownloadMode
snake_case : List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 281 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case_ = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
snake_case_ = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
snake_case_ = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = [3, 2, 4, 4]
_UpperCAmelCase : Optional[Any] = [4, 3, 2, 3]
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : str = 6
_UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 285 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
if config_name_or_path is None:
_UpperCAmelCase : List[Any] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
_UpperCAmelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCAmelCase : Optional[int] = question_encoder_name_or_path
_UpperCAmelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
_UpperCAmelCase : List[Any] = RagConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Dict = gen_config
_UpperCAmelCase : int = question_encoder_config
_UpperCAmelCase : Optional[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 170 | 1 |
import os
from collections.abc import Iterator
def a__ ( __UpperCamelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCamelCase , __UpperCamelCase ).lstrip("./" )
def a__ ( __UpperCamelCase ):
return F'''{i * " "}*''' if i else "\n##"
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__UpperCamelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def a__ ( __UpperCamelCase = "." ):
SCREAMING_SNAKE_CASE_ = ""
for filepath in sorted(good_file_paths(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = os.path.split(__UpperCamelCase )
if filepath != old_path:
SCREAMING_SNAKE_CASE_ = print_path(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE_ = F'''{filepath}/{filename}'''.replace(" " , "%20" )
SCREAMING_SNAKE_CASE_ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(__UpperCamelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 118 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co./yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float , _snake_case : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Optional[int] = {
"naver-clova-ix/donut-base": "https://huggingface.co./naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co./models?filter=donut-swin
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'donut-swin'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , **_a , ):
super().__init__(**_a )
__magic_name__ : Optional[int] = image_size
__magic_name__ : Any = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Dict = embed_dim
__magic_name__ : Dict = depths
__magic_name__ : int = len(_a )
__magic_name__ : str = num_heads
__magic_name__ : Tuple = window_size
__magic_name__ : Dict = mlp_ratio
__magic_name__ : List[str] = qkv_bias
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = drop_path_rate
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = use_absolute_embeddings
__magic_name__ : Union[str, Any] = layer_norm_eps
__magic_name__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
| 41 | 1 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCamelCase__ :
"""simple docstring"""
pass
| 115 |
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__UpperCAmelCase : Union[str, Any] = mid
else:
__UpperCAmelCase : List[Any] = mid
__UpperCAmelCase : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase ( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 115 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
"""simple docstring"""
for n in range(1, 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = 1
a = 2
while i * i <= n:
a = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(snake_case__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 365 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co./snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'efficientformer'
def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_act
a = hidden_dropout_prob
a = hidden_sizes
a = num_hidden_layers
a = num_attention_heads
a = initializer_range
a = layer_norm_eps
a = patch_size
a = num_channels
a = depths
a = mlp_expansion_ratio
a = downsamples
a = dim
a = key_dim
a = attention_ratio
a = resolution
a = pool_size
a = downsample_patch_size
a = downsample_stride
a = downsample_pad
a = drop_path_rate
a = num_metaad_blocks
a = distillation
a = use_layer_scale
a = layer_scale_init_value
a = image_size
a = batch_norm_eps
| 330 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "naver-clova-ix/donut-base-finetuned-docvqa"
A_ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A_ = "document_qa"
A_ = AutoProcessor
A_ = VisionEncoderDecoderModel
A_ = ["image", "text"]
A_ = ["text"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__a , **__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a : Any = task_prompt.replace('{user_input}' , __a )
__a : Optional[Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors='pt' ).input_ids
__a : Optional[int] = self.pre_processor(__a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.pre_processor.batch_decode(__a )[0]
__a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__a : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__a : Any = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token
__a : Tuple = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 27 |
from __future__ import annotations
import math
lowerCamelCase : List[Any] = '''2020.9.26'''
lowerCamelCase : str = '''xcodz-dot, cclaus, dhruvmanila'''
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in locals().values() ):
__lowercase : str = F"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(lowerCAmelCase_ )
__lowercase : List[Any] = ((x * distance) / (z + distance)) * scale
__lowercase : Tuple = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : str , lowerCAmelCase_ : float ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""Axis must be a str""" )
__lowercase : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in input_variables.values() ):
__lowercase : List[str] = (
"""Input values except axis must either be float or int: """
F"{list(input_variables.values() )}"
)
raise TypeError(lowerCAmelCase_ )
__lowercase : Tuple = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__lowercase : int = x * math.cos(lowerCAmelCase_ ) - y * math.sin(lowerCAmelCase_ )
__lowercase : Tuple = y * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ )
__lowercase : Union[str, Any] = z
elif axis == "x":
__lowercase : str = y * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ )
__lowercase : Dict = z * math.cos(lowerCAmelCase_ ) + y * math.sin(lowerCAmelCase_ )
__lowercase : List[str] = x
elif axis == "y":
__lowercase : List[str] = x * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ )
__lowercase : List[str] = z * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ )
__lowercase : List[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''') | 233 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""DeiTFeatureExtractor"""]
_lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Dict ={
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170 |
import math
import sys
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : str = """"""
try:
with open(_lowercase , """rb""") as binary_file:
a__ : Any = binary_file.read()
for dat in data:
a__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
a__ , a__ : Optional[int] = """""", """"""
a__ : int = len(_lowercase)
for i in range(len(_lowercase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a__ : List[str] = lexicon[curr_string]
result += last_match_id
a__ : Any = last_match_id + """0"""
if math.loga(_lowercase).is_integer():
a__ : Union[str, Any] = {}
for curr_key in list(_lowercase):
a__ : Optional[Any] = lexicon.pop(_lowercase)
a__ : Union[str, Any] = new_lex
a__ : str = last_match_id + """1"""
index += 1
a__ : List[Any] = """"""
return result
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : List[Any] = 8
try:
with open(_lowercase , """wb""") as opened_file:
a__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase) , _lowercase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("""10000000""")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2).to_bytes(1 , byteorder="""big"""))
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
a__ : Optional[Any] = data_bits[counter:]
a__ : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : Dict = read_file_binary(_lowercase)
a__ : str = remove_prefix(_lowercase)
a__ : List[str] = decompress_data(_lowercase)
write_file_binary(_lowercase , _lowercase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 170 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
_lowerCamelCase : Tuple = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_lowerCamelCase : Dict = components[:-1] + [test_fn.replace('''.py''', '''''' )]
_lowerCamelCase : Optional[int] = '''.'''.join(A_ )
return test_module_path
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_module_path(A_ )
_lowerCamelCase : str = importlib.import_module(A_ )
return test_module
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = get_test_module(A_ )
for attr in dir(A_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(A_, A_ ) )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
_lowerCamelCase : int = get_test_module(A_ )
for attr in dir(A_ ):
_lowerCamelCase : Optional[Any] = getattr(A_, A_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCamelCase : int = getattr(A_, '''all_model_classes''', [] )
if len(A_ ) > 0:
test_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_test_classes(A_ )
_lowerCamelCase : Optional[int] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = test_class()
if hasattr(A_, '''setUp''' ):
test.setUp()
_lowerCamelCase : int = None
if hasattr(A_, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCamelCase : Tuple = test.model_tester.__class__
return model_tester
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_test_classes(A_ )
_lowerCamelCase : List[str] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Optional[Any], A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = get_test_classes_for_model(A_, A_ )
_lowerCamelCase : Any = []
for test_class in test_classes:
_lowerCamelCase : Union[str, Any] = get_model_tester_from_test_class(A_ )
if tester_class is not None:
tester_classes.append(A_ )
# sort with class names
return sorted(A_, key=lambda A_ : x.__name__ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = get_test_classes(A_ )
_lowerCamelCase : str = {test_class: get_model_tester_from_test_class(A_ ) for test_class in test_classes}
return test_tester_mapping
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_model_classes(A_ )
_lowerCamelCase : Optional[Any] = {
model_class: get_test_classes_for_model(A_, A_ ) for model_class in model_classes
}
return model_test_mapping
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : str = get_model_classes(A_ )
_lowerCamelCase : int = {
model_class: get_tester_classes_for_model(A_, A_ ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
if isinstance(A_, A_ ):
return o
elif isinstance(A_, A_ ):
return o.__name__
elif isinstance(A_, (list, tuple) ):
return [to_json(A_ ) for x in o]
elif isinstance(A_, A_ ):
return {to_json(A_ ): to_json(A_ ) for k, v in o.items()}
else:
return o
| 175 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co./facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co./models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_A : Optional[Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
a = 1_0000
a = None
a = None
class _lowercase ( datasets.ArrowBasedBuilder ):
a = ParquetConfig
def lowerCamelCase_ ( self: Optional[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
lowerCamelCase__ : Any = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : Optional[int] = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase__ : Dict = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : List[Any] = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__ : Optional[int] = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int ):
lowerCamelCase__ : str = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : List[Any] = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase__ : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise
| 41 |
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"""microsoft/focalnet-tiny""": """https://huggingface.co./microsoft/focalnet-tiny/resolve/main/config.json""",
}
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = """focalnet"""
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any]=224 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=96 , UpperCAmelCase : str=False , UpperCAmelCase : Optional[Any]=[192, 384, 768, 768] , UpperCAmelCase : List[str]=[2, 2, 6, 2] , UpperCAmelCase : List[str]=[2, 2, 2, 2] , UpperCAmelCase : Union[str, Any]=[3, 3, 3, 3] , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Tuple=4.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Tuple=1e-4 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Dict=False , UpperCAmelCase : Any=False , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : str=1e-5 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] , ) -> List[str]:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : Optional[int] = use_conv_embed
lowerCamelCase__ : Tuple = hidden_sizes
lowerCamelCase__ : Union[str, Any] = depths
lowerCamelCase__ : Union[str, Any] = focal_levels
lowerCamelCase__ : List[Any] = focal_windows
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[Any] = mlp_ratio
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Any = drop_path_rate
lowerCamelCase__ : List[Any] = use_layerscale
lowerCamelCase__ : Optional[Any] = layerscale_value
lowerCamelCase__ : Tuple = use_post_layernorm
lowerCamelCase__ : Optional[int] = use_post_layernorm_in_modulation
lowerCamelCase__ : List[Any] = normalize_modulator
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = encoder_stride
lowerCamelCase__ : List[str] = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 45 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase__ : str = [tuple(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else key for key in keys]
lowerCamelCase__ : Optional[Any] = Counter(UpperCAmelCase )
lowerCamelCase__ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def A_ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=False ) -> int:
lowerCamelCase__ : int = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase )
return mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple[Optional[str], str]:
lowerCamelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase__ : List[str] = full_content[1:].index('---' ) + 1
lowerCamelCase__ : Tuple = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
# class attributes
UpperCAmelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls : str , UpperCAmelCase : Path ) -> "DatasetMetadata":
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase )
else:
return cls()
def A_ ( self : List[str] , UpperCAmelCase : Path ) -> Any:
if path.exists():
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ : Any = readme_file.read()
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = self._to_readme(UpperCAmelCase )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None ) -> str:
if readme_content is not None:
lowerCamelCase__ , lowerCamelCase__ : int = _split_yaml_from_readme(UpperCAmelCase )
lowerCamelCase__ : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def A_ ( cls : Union[str, Any] , UpperCAmelCase : str ) -> "DatasetMetadata":
lowerCamelCase__ : Any = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase__ : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
_UpperCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase : Tuple = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase : str = ap.parse_args()
_UpperCAmelCase : Optional[int] = Path(args.readme_filepath)
_UpperCAmelCase : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 45 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 |
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330 | 0 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> None:
snake_case : Any = order
# a_{0} ... a_{k}
snake_case : int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case : Optional[int] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case : Any = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case : int = [0.0] * self.order
def UpperCAmelCase ( self , A , A ) -> None:
if len(A ) < self.order:
snake_case : Any = [1.0, *a_coeffs]
if len(A ) != self.order + 1:
snake_case : Dict = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(A )}"""
)
raise ValueError(A )
if len(A ) != self.order + 1:
snake_case : int = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(A )}"""
)
raise ValueError(A )
snake_case : List[str] = a_coeffs
snake_case : Any = b_coeffs
def UpperCAmelCase ( self , A ) -> float:
snake_case : Union[str, Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case : Optional[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case : int = self.input_history[:-1]
snake_case : Union[str, Any] = self.output_history[:-1]
snake_case : Any = sample
snake_case : List[Any] = result
return result
| 176 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co./snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Dict = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = hidden_sizes
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[int] = num_channels
snake_case : int = depths
snake_case : Optional[int] = mlp_expansion_ratio
snake_case : Any = downsamples
snake_case : Dict = dim
snake_case : Optional[int] = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Dict = pool_size
snake_case : Any = downsample_patch_size
snake_case : Tuple = downsample_stride
snake_case : Any = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : List[str] = num_metaad_blocks
snake_case : Union[str, Any] = distillation
snake_case : List[str] = use_layer_scale
snake_case : int = layer_scale_init_value
snake_case : Union[str, Any] = image_size
snake_case : Dict = batch_norm_eps
| 176 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : str = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ['''LayoutLMv2FeatureExtractor''']
_A : Any = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 229 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
# We need to create solution object to save path.
__lowercase =[[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
__lowercase =run_maze(_lowerCAmelCase , 0 , 0 , _lowerCAmelCase )
if solved:
print('\n'.join(str(_lowerCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
__lowercase =1
return True
__lowercase =(not i < 0) and (not j < 0) # Check lower bounds
__lowercase =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase =1
# check for directions
if (
run_maze(_lowerCAmelCase , i + 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j + 1 , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j - 1 , _lowerCAmelCase )
):
return True
__lowercase =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
import functools
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
__lowercase =len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowercase =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase_ : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase )
else:
UpperCamelCase_ : str = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : str = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase , output_loading_info=lowerCamelCase )
UpperCamelCase_ : Tuple = ['key_proj', 'value_proj', 'query_proj']
UpperCamelCase_ : Dict = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
UpperCamelCase_ : str = key.split('.' )
if attributes[0] == "lm_head":
UpperCamelCase_ : Optional[int] = prophet
UpperCamelCase_ : int = prophet_old
else:
UpperCamelCase_ : Dict = prophet.prophetnet
UpperCamelCase_ : Optional[Any] = prophet_old.model
UpperCamelCase_ : Dict = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase_ : Optional[int] = mapping[attribute]
if not hasattr(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0:
UpperCamelCase_ : str = attribute
elif hasattr(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase_ : Optional[Any] = old_model.weight
logger.info(F"{attribute} is initialized." )
UpperCamelCase_ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase_ : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
UpperCamelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(lowerCamelCase , 'in_proj_weight' ):
UpperCamelCase_ : List[str] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase_ : List[str] = getattr(lowerCamelCase , lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase_ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase_ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase_ : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase_ : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase_ : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase_ : str = True
break
if attribute.isdigit():
UpperCamelCase_ : List[str] = model[int(lowerCamelCase )]
UpperCamelCase_ : Any = old_model[int(lowerCamelCase )]
else:
UpperCamelCase_ : str = getattr(lowerCamelCase , lowerCamelCase )
if old_attribute == "":
UpperCamelCase_ : str = old_model
else:
if not hasattr(lowerCamelCase , lowerCamelCase ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
UpperCamelCase_ : Any = getattr(lowerCamelCase , lowerCamelCase )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 175 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = 'huggingface/label-files'
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase_ : int = BitConfig(
conv_layer=lowerCamelCase , num_labels=1000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def __lowercase ( lowerCamelCase : int ):
if "stem.conv" in name:
UpperCamelCase_ : str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase_ : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
UpperCamelCase_ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
UpperCamelCase_ : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase_ : int = 'bit.encoder.' + name
return name
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[str]=False ):
UpperCamelCase_ : Optional[Any] = get_config(lowerCamelCase )
# load original model from timm
UpperCamelCase_ : Dict = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase_ : str = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase_ : Tuple = state_dict.pop(lowerCamelCase )
UpperCamelCase_ : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase_ : Dict = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
UpperCamelCase_ : int = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
UpperCamelCase_ : List[Any] = transform.transforms
UpperCamelCase_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase_ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase_ : Dict = prepare_img()
UpperCamelCase_ : Any = transform(lowerCamelCase ).unsqueeze(0 )
UpperCamelCase_ : Union[str, Any] = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCamelCase_ : Dict = model(lowerCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase_ : List[Any] = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : Dict , __a : Optional[Any]=None , __a : Tuple=None , __a : str=None , __a : Dict="resnet50" , __a : List[str]=3 , __a : Any=32 , __a : Dict=3 , __a : str=True , __a : Tuple=True , ) -> Any:
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = out_indices if out_indices is not None else [4]
_UpperCamelCase : Optional[Any] = stage_names
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Optional[Any] = backbone
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : str = use_pretrained_backbone
_UpperCamelCase : List[Any] = is_training
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Any , __a : Optional[Any] ) -> List[str]:
_UpperCamelCase : Tuple = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Dict = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Dict = False
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Dict = TimmBackboneModelTester(self )
_UpperCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Union[str, Any] = "resnet18"
_UpperCamelCase : Optional[int] = "microsoft/resnet-18"
_UpperCamelCase : Optional[int] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
_UpperCamelCase : str = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
_UpperCamelCase : List[str] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
pass
@unittest.skip("Safetensors is not supported by timm." )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(__a )
_UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Dict = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCamelCase : str = self.all_model_classes[0]
_UpperCamelCase : str = model_class(__a )
model.to(__a )
_UpperCamelCase : int = self._prepare_for_class(__a , __a )
_UpperCamelCase : Optional[Any] = model(**__a )
_UpperCamelCase : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCamelCase : Tuple = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCamelCase : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCamelCase : List[str] = copy.deepcopy(__a )
_UpperCamelCase : Dict = None
_UpperCamelCase : Dict = model_class(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_UpperCamelCase : Dict = copy.deepcopy(__a )
_UpperCamelCase : int = False
_UpperCamelCase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(**__a )
| 350 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def A () -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : List[Any] =DebertaTokenizer
A__ : str =True
A__ : List[Any] =DebertaTokenizerFast
def A_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ = {'unk_token': '[UNK]'}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def A_ ( self : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = 'lower newer'
SCREAMING_SNAKE_CASE__ = 'lower newer'
return input_text, output_text
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = 'lower newer'
SCREAMING_SNAKE_CASE__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer('Hello' , 'World' )
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , UpperCAmelCase_ )
@slow
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
SCREAMING_SNAKE_CASE__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) for seq in encoding['input_ids']]
# fmt: off
SCREAMING_SNAKE_CASE__ = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , UpperCAmelCase_ )
for expected, decoded in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 176 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 1 |
import qiskit
def __magic_name__ ( __a : int = 2 ):
'''simple docstring'''
UpperCamelCase__ = qubits
# Using Aer's simulator
UpperCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase__ = qiskit.QuantumCircuit(__a , __a )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __a ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __a )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__a ) ) , list(range(__a ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase__ = qiskit.execute(__a , __a , shots=1_000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}') | 356 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.001 )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
| 178 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCamelCase : Dict = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCamelCase : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: List[str] , *a: List[Any] , **a: Optional[Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 194 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__A : Any = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A : Any = {
'vocab_file': {
'camembert-base': 'https://huggingface.co./camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co./camembert-base/resolve/main/tokenizer.json',
},
}
__A : List[Any] = {
'camembert-base': 512,
}
__A : Union[str, Any] = '▁'
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ['input_ids', 'attention_mask']
lowercase : List[str] = CamembertTokenizer
def __init__( self :Optional[Any] ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Any=None ,_UpperCamelCase :List[Any]="<s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :Dict="<s>" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :List[str]="<pad>" ,_UpperCamelCase :Union[str, Any]="<mask>" ,_UpperCamelCase :Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] ,**_UpperCamelCase :str ,):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[str] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
super().__init__(
_UpperCamelCase ,tokenizer_file=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Dict = vocab_file
snake_case_ : Tuple = False if not self.vocab_file else True
def a__ ( self :List[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
snake_case_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Any = os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file ,_UpperCamelCase )
return (out_vocab_file,) | 361 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__A : Tuple = logging.get_logger(__name__)
__A : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : str = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co./facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co./facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co./facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__A : Optional[Any] = {
'facebook/blenderbot_small-90M': 512,
}
class __UpperCamelCase ( lowercase__ ):
lowercase : str = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Dict = BlenderbotSmallTokenizer
def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Any = add_prefix_space
def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ):
snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : int = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 8 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _snake_case( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 7 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ = False
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionAttendAndExcitePipeline
lowerCamelCase = False
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case__ ( cls : Any )-> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = A__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = pipe(**lowercase_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 6_4, 6_4, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_,1E-3 )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Any )-> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : int )-> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = torch.manual_seed(5_1 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa )
pipe.to('cuda' )
A__ = 'a painting of an elephant with glasses'
A__ = [5, 7]
A__ = pipe(
prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0]
A__ = load_numpy(
'https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 7 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowercase_ = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ = True
elif name.split("." )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(_UpperCAmelCase )[0].split("." )[-2]
lowercase_ = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
lowercase_ = 'weight_g'
elif "weight_v" in name:
lowercase_ = 'weight_v'
elif "bias" in name:
lowercase_ = 'bias'
elif "weight" in name:
lowercase_ = 'weight'
else:
lowercase_ = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = full_name.split("conv_layers." )[-1]
lowercase_ = name.split("." )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowercase_ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(" " )[0] for line in lines]
lowercase_ = len(_UpperCAmelCase )
lowercase_ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_UpperCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str , ):
'''simple docstring'''
lowercase_ = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
lowercase_ = SpeechaTextaConfig.from_pretrained(
_UpperCAmelCase , vocab_size=_UpperCAmelCase , decoder_layers=_UpperCAmelCase , do_stable_layer_norm=_UpperCAmelCase )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(_UpperCAmelCase )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , _UpperCAmelCase )
lowercase_ = SpeechaTextaForCausalLM(_UpperCAmelCase )
lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase_ = SpeechEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ = SpeechaTextaTokenizer(os.path.join(_UpperCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = 'speech_to_text_2'
lowercase_ = 'wav2vec2'
lowercase_ = SpeechEncoderDecoderConfig.from_dict(_UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
feature_extractor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 351 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (snake_case_ , unittest.TestCase ):
"""simple docstring"""
__a =AlbertTokenizer
__a =AlbertTokenizerFast
__a =True
__a =True
__a =True
def UpperCamelCase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
_a = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Dict , __a : Dict ):
_a = "this is a test"
_a = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self : Any ):
_a = "<pad>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[Any] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 3_00_00 )
def UpperCamelCase__ ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCamelCase__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.tokenize(__a )
_a = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(__a )
_a = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase__ ( self : Dict ):
_a = AlbertTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 12_89] )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCamelCase__ ( self : Tuple ):
_a = AlbertTokenizer(__a )
_a = tokenizer.encode("sequence builders" )
_a = tokenizer.encode("multi-sequence build" )
_a = tokenizer.build_inputs_with_special_tokens(__a )
_a = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase__ ( self : Dict ):
# fmt: off
_a = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 63 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( a_ , a_ , a_ , a_=None):
# Initialise PyTorch model
snake_case_ = XLNetConfig.from_json_file(a_)
snake_case_ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
snake_case_ = finetuning_task
snake_case_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case_ = XLNetForSequenceClassification(a_)
elif "squad" in finetuning_task:
snake_case_ = finetuning_task
snake_case_ = XLNetForQuestionAnswering(a_)
else:
snake_case_ = XLNetLMHeadModel(a_)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a_ , a_ , a_)
# Save pytorch-model
snake_case_ = os.path.join(a_ , a_)
snake_case_ = os.path.join(a_ , a_)
print(f'''Save PyTorch model to {os.path.abspath(a_)}''')
torch.save(model.state_dict() , a_)
print(f'''Save configuration file to {os.path.abspath(a_)}''')
with open(a_ , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 178 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __magic_name__ : str ):
a__: Optional[Any] =[0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
a__: Union[str, Any] =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
a__: List[str] =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
a__: Any =j
return prefix_result
def __lowerCamelCase ( __magic_name__ : str ):
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 | 0 |
"""simple docstring"""
import datasets
_a = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
_a = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_a = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return {"accuracy": simple_accuracy(__a , __a)}
| 194 |
"""simple docstring"""
import torch
from torch import nn
class _UpperCAmelCase( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = n_token
_UpperCamelCase = d_embed
_UpperCamelCase = d_proj
_UpperCamelCase = cutoffs + [n_token]
_UpperCamelCase = [0] + self.cutoffs
_UpperCamelCase = div_val
_UpperCamelCase = self.cutoffs[0]
_UpperCamelCase = len(self.cutoffs) - 1
_UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters))
_UpperCamelCase = nn.ModuleList()
_UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
else:
self.out_projs.append(__a)
self.out_layers.append(nn.Linear(__a , __a))
else:
for i in range(len(self.cutoffs)):
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
self.out_layers.append(nn.Linear(__a , r_idx - l_idx))
_UpperCamelCase = keep_order
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Any:
'''simple docstring'''
if proj is None:
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCamelCase = nn.functional.linear(__a , proj.t().contiguous())
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , __a , __a=None , __a=False) -> Dict:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_UpperCamelCase = hidden[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
_UpperCamelCase = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''')
else:
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_UpperCamelCase = labels != -1_00
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = (
-nn.functional.log_softmax(__a , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_UpperCamelCase = nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
if labels is None:
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = 0
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCamelCase = labels.index_select(0 , __a) - l_idx
_UpperCamelCase = head_logprob.index_select(0 , __a)
_UpperCamelCase = hidden.index_select(0 , __a)
else:
_UpperCamelCase = hidden
if i == 0:
if labels is not None:
_UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''') and self.keep_order) or keep_order:
out.index_copy_(0 , __a , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCamelCase = logprob_i
return out
| 194 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
import pprint
import requests
_A = 'https://zenquotes.io/api'
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 117 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = set()
a_ = []
def parse_line(UpperCAmelCase ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
a_ = "\n".join(SCREAMING_SNAKE_CASE__ )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE__ )
buffer.clear()
continue
else:
a_ = line.strip()
buffer.append(SCREAMING_SNAKE_CASE__ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE__ ):
a_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE__ ) as fp:
parse_line(SCREAMING_SNAKE_CASE__ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE__ ) as fp:
parse_line(SCREAMING_SNAKE_CASE__ )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = set()
a_ = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return values.split("," )
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase_ = extract_warnings(args.output_dir, args.targets)
UpperCamelCase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 243 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def __lowerCAmelCase ( self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
| 368 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
A : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ):
A : List[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
A : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 311 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase_ : Union[str, Any] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCAmelCase_ : str = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCAmelCase_ : str = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase_ : Union[str, Any] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase_ : str = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase :int = k.replace(__magic_name__ , __magic_name__ )
return k
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , __magic_name__ : dict ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
UpperCamelCase :Any = BigBirdPegasusConfig(**__magic_name__ )
UpperCamelCase :List[str] = BigBirdPegasusForConditionalGeneration(__magic_name__ )
UpperCamelCase :Optional[Any] = torch_model.state_dict()
UpperCamelCase :str = {}
# separating decoder weights
UpperCamelCase :Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
UpperCamelCase :Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
UpperCamelCase :Optional[Any] = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE]
if any(__magic_name__ ):
continue
UpperCamelCase :Any = DECODER_PATTERNS
UpperCamelCase :str = rename_state_dict_key(__magic_name__ , __magic_name__ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCamelCase :List[Any] = v.T
UpperCamelCase :List[str] = torch.from_numpy(__magic_name__ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
UpperCamelCase :int = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE]
if any(__magic_name__ ):
continue
UpperCamelCase :Tuple = REMAINING_PATTERNS
UpperCamelCase :int = rename_state_dict_key(__magic_name__ , __magic_name__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCamelCase :Union[str, Any] = v.T
UpperCamelCase :Union[str, Any] = torch.from_numpy(__magic_name__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCamelCase :Dict = mapping["""model.embed_positions.weight"""]
UpperCamelCase :int = mapping.pop("""model.embed_positions.weight""" )
UpperCamelCase , UpperCamelCase :Optional[Any] = torch_model.load_state_dict(__magic_name__ , strict=__magic_name__ )
UpperCamelCase :Dict = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase :Optional[Any] = tf.train.list_variables(__magic_name__ )
UpperCamelCase :str = {}
UpperCamelCase :Optional[int] = ["""global_step"""]
for name, shape in tqdm(__magic_name__ , desc="""converting tf checkpoint to dict""" ):
UpperCamelCase :Dict = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase :Any = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCamelCase :Tuple = array
return tf_weights
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : dict ) -> int:
"""simple docstring"""
UpperCamelCase :List[Any] = get_tf_weights_as_numpy(__magic_name__ )
UpperCamelCase :List[str] = convert_bigbird_pegasus(__magic_name__ , __magic_name__ )
torch_model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase_ : int = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 38 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co./allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co./allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co./allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
from __future__ import annotations
def snake_case( __magic_name__ = 4 ) -> list[list[int]]:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ ) or 4
return [[1 + x + y * row_size for x in range(__magic_name__ )] for y in range(__magic_name__ )]
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(__magic_name__ ) )
# OR.. transpose(reverse_column(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(__magic_name__ ) )
# OR.. reverse_column(reverse_row(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(__magic_name__ ) )
# OR.. transpose(reverse_row(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : Optional[int] = [list(__magic_name__ ) for x in zip(*__magic_name__ )]
return matrix
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : Dict = matrix[::-1]
return matrix
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : int = [x[::-1] for x in matrix]
return matrix
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
for i in matrix:
print(*__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 352 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co./facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co./models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''maskformer'''
_UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCamelCase : Dict = ['''resnet''', '''swin''']
_UpperCamelCase : Optional[int] = ['''detr''']
def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co./microsoft/swin-base-patch4-window12-384-in22k
lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : Optional[int] = backbone_config.pop('''model_type''' )
lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase : Union[str, Any] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co./facebook/detr-resnet-50
lowercase : Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Union[str, Any] = (
decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
lowercase : str = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(_A )
lowercase : Tuple = backbone_config
lowercase : List[Any] = decoder_config
# main feature dimension for the model
lowercase : Optional[int] = fpn_feature_size
lowercase : List[Any] = mask_feature_size
# initializer
lowercase : Union[str, Any] = init_std
lowercase : Tuple = init_xavier_std
# Hungarian matcher && loss
lowercase : List[str] = cross_entropy_weight
lowercase : int = dice_weight
lowercase : List[Any] = mask_weight
lowercase : Tuple = use_auxiliary_loss
lowercase : Tuple = no_object_weight
lowercase : int = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any:
"""simple docstring"""
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def __a ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.backbone_config.to_dict()
lowercase : List[Any] = self.decoder_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output | 116 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : int , __snake_case : List[Any]=7 , __snake_case : Any=3 , __snake_case : Any=18 , __snake_case : str=30 , __snake_case : Any=4_00 , __snake_case : Optional[int]=True , __snake_case : str=None , __snake_case : Any=True , __snake_case : List[Any]=None , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def lowerCamelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_size''' ) )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 177 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if "." in tensor_name:
A : List[str] = tensor_name.split('''.''' )
for split in splits[:-1]:
A : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
A : Optional[int] = new_module
A : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
A : Tuple = tensor_name in module._buffers
A : int = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
A : List[str] = False
A : Tuple = False
if is_buffer or not is_bitsandbytes_available():
A : Union[str, Any] = False
A : Optional[Any] = False
else:
A : Any = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
A : Optional[Any] = value.to('''cpu''' )
if value.dtype == torch.inta:
A : Tuple = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A : List[Any] = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
A : Optional[int] = new_value.T
A : Optional[Any] = old_value.__dict__
if is_abit:
A : Optional[int] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
A : int = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
A : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
A : List[str] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
A : Optional[Any] = value.to(snake_case__ )
else:
A : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
A : str = new_value
else:
A : Optional[Any] = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
A : Any = new_value
def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
A : List[Any] = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
A, A : Tuple = module.weight.shape
else:
A : int = module.in_features
A : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A : int = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A : str = True
# Store the module class in case we need to transpose the weight later
A : Tuple = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
A, A : Any = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
A : str = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A, A : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCAmelCase_ ( *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def lowerCAmelCase_ ( *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Any = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A : Optional[Any] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
A : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : int = sum(snake_case__ , [] )
A : Dict = len(snake_case__ ) > 0
# Check if it is a base model
A : Dict = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : List[Any] = list(model.named_children() )
A : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
A : Dict = set(snake_case__ ) - set(snake_case__ )
A : Optional[Any] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
A : Any = ['''.weight''', '''.bias''']
A : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : Dict = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 3 |
import heapq
import sys
import numpy as np
snake_case__ : Tuple = tuple[int, int]
class A_ :
def __init__(self :Union[str, Any] )-> Union[str, Any]:
__A = []
__A = set()
def _lowerCAmelCase (self :List[str] )-> str:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _lowerCAmelCase (self :Any )-> Any:
return len(self.elements ) == 0
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] )-> Optional[int]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase (self :int , _UpperCamelCase :int )-> int:
if item in self.set:
self.set.remove(_UpperCamelCase )
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase (self :Optional[Any] )-> int:
return self.elements[0][1]
def _lowerCAmelCase (self :List[str] )-> List[Any]:
((__A) , (__A)) = heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
__A = np.array(lowerCamelCase )
__A = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> List[str]:
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _a ( lowerCamelCase: TPos , lowerCamelCase: int , lowerCamelCase: TPos , lowerCamelCase: dict[TPos, float] ) -> Tuple:
'''simple docstring'''
__A = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__A = '''*'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__A = '''#'''
__A = '''-'''
__A = back_pointer[goal]
while x != start:
((__A) , (__A)) = x
# print(x)
__A = '''-'''
__A = back_pointer[x]
__A = '''-'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__A = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=''' ''' )
__A = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def _a ( lowerCamelCase: TPos ) -> Optional[Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int] , lowerCamelCase: Tuple , lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: str , lowerCamelCase: List[Any] , lowerCamelCase: Dict , ) -> Dict:
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__A) , (__A)) = s
__A = (x - 1, y)
__A = (x + 1, y)
__A = (x, y + 1)
__A = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__A = -1
__A = float('''inf''' )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__A = g_function[s] + 1
__A = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def _a ( ) -> str:
'''simple docstring'''
__A = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case__ : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case__ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case__ : List[str] = make_common_ground()
snake_case__ : int = blocks_blk
# hyper parameters
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = 1
snake_case__ : int = 20
snake_case__ : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
snake_case__ : Tuple = (0, 0)
snake_case__ : Any = (n - 1, n - 1)
snake_case__ : List[Any] = 1
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__A = {start: 0, goal: float('''inf''' )}
__A = {start: -1, goal: -1}
__A = []
__A = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__A = []
__A = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A , __A = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 117 | 0 |
import math
import os
import sys
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
try:
with open(_UpperCAmelCase , '''rb''' ) as binary_file:
__a = binary_file.read()
for dat in data:
__a = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lexicon.pop(_UpperCAmelCase )
__a = last_match_id
if math.loga(_UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__a = '''0''' + lexicon[curr_key]
__a = bin(_UpperCAmelCase )[2:]
def __snake_case ( _UpperCAmelCase ):
__a = {'''0''': '''0''', '''1''': '''1'''}
__a , __a = '''''', ''''''
__a = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
index += 1
__a = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__a = lexicon[curr_string]
result += last_match_id
return result
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = os.path.getsize(_UpperCAmelCase )
__a = bin(_UpperCAmelCase )[2:]
__a = len(_UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = 8
try:
with open(_UpperCAmelCase , '''wb''' ) as opened_file:
__a = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = read_file_binary(_UpperCAmelCase )
__a = compress_data(_UpperCAmelCase )
__a = add_file_length(_UpperCAmelCase , _UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 131 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case :Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE)} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''')
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(__SCREAMING_SNAKE_CASE).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation')
else:
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __SCREAMING_SNAKE_CASE)
and all(x is None for x in model_inputs['''input_ids'''])
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name)
__a = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE)
return records
| 131 | 1 |
def lowerCAmelCase_ ( _lowercase : Dict) -> int:
"""simple docstring"""
for i in range(0 , _lowercase):
for _ in range(0 , n - i - 1): # printing spaces
print(""" """ , end="""""")
for _ in range(0 , i + 1): # printing stars
print("""* """ , end="""""")
print()
def lowerCAmelCase_ ( _lowercase : List[Any]) -> str:
"""simple docstring"""
for i in range(_lowercase , 0 , -1):
for _ in range(_lowercase , 0 , -1): # printing stars
print("""* """ , end="""""")
print()
for _ in range(n - i + 1 , 0 , -1): # printing spaces
print(""" """ , end="""""")
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[Any]:
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""")
return
floyd(_lowercase) # upper half
reverse_floyd(_lowercase) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_lowercase : int =1
while K:
_lowercase : Tuple =int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_lowercase : str =int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 170 |
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[Any] =0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple =Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
a__ : str =Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
a__ : Optional[int] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict =Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
a__ : int =Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
a__ : Optional[int] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Union[str, Any] =CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : List[str] =Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
a__ : Optional[Any] =Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : Tuple =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop("image_processor_type" )
a__ : str =CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ : List[str] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
a__ : List[str] =json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] =Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
a__ : Union[str, Any] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "clip-base is not a local folder and is not a valid model identifier" ):
a__ : Optional[Any] =AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a__ : Optional[int] =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a__ : Optional[int] =AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a__ : List[Any] =AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a__ : Optional[int] =AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
a__ : Optional[int] =AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ : Any =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[Any] =Path(__SCREAMING_SNAKE_CASE ) / "preprocessor_config.json"
a__ : Dict =Path(__SCREAMING_SNAKE_CASE ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__SCREAMING_SNAKE_CASE , "w" ) , )
json.dump({"model_type": "clip"} , open(__SCREAMING_SNAKE_CASE , "w" ) )
a__ : Dict =CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ : int =AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
class __lowerCAmelCase ( _A):
_lowercase : int = True
try:
AutoConfig.register("custom" , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
a__ : Tuple =AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Optional[Any] =AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : str =AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 363 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co./Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_vision_model"""
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Tuple =hidden_size
a__ : Any =intermediate_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =patch_size
a__ : int =image_size
a__ : Tuple =initializer_range
a__ : Any =attention_dropout
a__ : List[Any] =layer_norm_eps
a__ : Optional[Any] =hidden_act
a__ : Optional[Any] =qkv_bias
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : Optional[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_qformer"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Optional[Any] =hidden_size
a__ : str =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : Dict =hidden_act
a__ : Optional[int] =intermediate_size
a__ : Union[str, Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Union[str, Any] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : int =position_embedding_type
a__ : int =cross_attention_frequency
a__ : Tuple =encoder_hidden_size
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : str =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[int] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip"""
_lowercase : List[Any] = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
a__ : List[Any] ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : Tuple ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : Dict ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Dict =InstructBlipVisionConfig(**lowerCAmelCase__ )
a__ : Union[str, Any] =InstructBlipQFormerConfig(**lowerCAmelCase__ )
a__ : Tuple =text_config["model_type"] if "model_type" in text_config else "opt"
a__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
a__ : Union[str, Any] =self.text_config.tie_word_embeddings
a__ : Optional[Any] =self.text_config.is_encoder_decoder
a__ : str =num_query_tokens
a__ : List[Any] =self.vision_config.hidden_size
a__ : str =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[Any] =1.0
a__ : List[str] =0.02
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =copy.deepcopy(self.__dict__ )
a__ : int =self.vision_config.to_dict()
a__ : str =self.qformer_config.to_dict()
a__ : str =self.text_config.to_dict()
a__ : List[str] =self.__class__.model_type
return output
| 148 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 32 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self, lowerCamelCase__ = 32, lowerCamelCase__ = 64, lowerCamelCase__ = 20, lowerCamelCase__ = 768, lowerCamelCase__=77, lowerCamelCase__=4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "silu", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "linear", lowerCamelCase__ = "prd", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
super().__init__()
A : int = num_attention_heads
A : Tuple = attention_head_dim
A : Optional[int] = num_attention_heads * attention_head_dim
A : List[str] = additional_embeddings
A : int = time_embed_dim or inner_dim
A : Tuple = embedding_proj_dim or embedding_dim
A : Dict = clip_embed_dim or embedding_dim
A : List[str] = Timesteps(lowerCamelCase__, lowerCamelCase__, 0 )
A : Any = TimestepEmbedding(lowerCamelCase__, lowerCamelCase__, out_dim=lowerCamelCase__, act_fn=lowerCamelCase__ )
A : List[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if embedding_proj_norm_type is None:
A : Union[str, Any] = None
elif embedding_proj_norm_type == "layer":
A : str = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A : Optional[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if encoder_hid_proj_type is None:
A : Dict = None
elif encoder_hid_proj_type == "linear":
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A : List[str] = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCamelCase__ ) )
if added_emb_type == "prd":
A : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCamelCase__ ) )
elif added_emb_type is None:
A : int = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A : int = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, dropout=lowerCamelCase__, activation_fn="""gelu""", attention_bias=lowerCamelCase__, )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
A : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
A : Tuple = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
A : str = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
A : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""", lowerCamelCase__, persistent=lowerCamelCase__ )
A : Dict = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
A : Tuple = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowerCAmelCase ( self ):
A : Optional[int] = {}
def fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return processors
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.set_attn_processor(AttnProcessor() )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, ):
A : Optional[int] = hidden_states.shape[0]
A : Tuple = timestep
if not torch.is_tensor(lowerCamelCase__ ):
A : List[Any] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
A : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A : Optional[Any] = timesteps * torch.ones(lowerCamelCase__, dtype=timesteps.dtype, device=timesteps.device )
A : str = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A : Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
A : Tuple = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
A : Optional[Any] = self.embedding_proj_norm(lowerCamelCase__ )
A : Any = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A : List[str] = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A : Tuple = self.proj_in(lowerCamelCase__ )
A : Dict = self.positional_embedding.to(hidden_states.dtype )
A : List[str] = []
A : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A : Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A : List[str] = hidden_states[:, None, :]
A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A : List[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__, -1, -1 )
additional_embeds.append(lowerCamelCase__ )
A : Union[str, Any] = torch.cat(
lowerCamelCase__, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A : Any = F.pad(
lowerCamelCase__, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
A : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
A : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
A : Tuple = F.pad(lowerCamelCase__, (0, self.additional_embeddings), value=0.0 )
A : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A : Any = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
A : str = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
A : Optional[Any] = block(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : Optional[int] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
A : Dict = hidden_states[:, -1]
else:
A : Optional[int] = hidden_states[:, additional_embeddings_len:]
A : Optional[Any] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 116 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co./allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co./allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co./allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__A = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[int] = LEDTokenizer
_UpperCAmelCase :int = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
lowercase__: Optional[int] = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__: int = add_prefix_space
lowercase__: Dict = pre_tok_class(**_UpperCAmelCase )
lowercase__: List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__: Optional[Any] = '''post_processor'''
lowercase__: List[str] = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
lowercase__: Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__: Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase__: int = tuple(state['''cls'''] )
lowercase__: Optional[Any] = False
if state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
lowercase__: List[str] = add_prefix_space
lowercase__: str = True
if state.get('''trim_offsets''' , _UpperCAmelCase ) != trim_offsets:
lowercase__: Any = trim_offsets
lowercase__: Any = True
if changes_to_apply:
lowercase__: Union[str, Any] = getattr(_UpperCAmelCase , state.pop('''type''' ) )
lowercase__: List[Any] = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
lowercase__: Optional[Any] = value
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: str = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Union[str, Any] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: List[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
lowercase__: int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[Any] = [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Dict = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase__: List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__: Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
lowercase__: Union[str, Any] = len(_UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__: int = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__: List[str] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 177 | """simple docstring"""
__A = [
(1_0_0_0, "M"),
(9_0_0, "CM"),
(5_0_0, "D"),
(4_0_0, "CD"),
(1_0_0, "C"),
(9_0, "XC"),
(5_0, "L"),
(4_0, "XL"),
(1_0, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
lowercase__: List[str] = 0
lowercase__: List[Any] = 0
while place < len(__UpperCAmelCase ):
if (place + 1 < len(__UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Optional[Any] = []
for arabic, roman in ROMAN:
((lowercase__), (lowercase__)): Tuple = divmod(__UpperCAmelCase , __UpperCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Any = logging.get_logger(__name__)
A_ : List[str] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A_ : Any = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co./abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co./abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A_ : str = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def UpperCamelCase (lowercase_: Dict , lowercase_: List[str] ) -> List[Any]:
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
A__ : List[Any] = json.loads(f.read() )
A__ : Dict = collections.OrderedDict()
A__ : str = collections.OrderedDict()
A__ : Optional[Any] = collections.OrderedDict()
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
A__ : int = f.readlines()
A__ : List[Any] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(lowercase_ ):
A__ : Tuple = b
A__ : Union[str, Any] = idx
for wd in b:
A__ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__ , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|startoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ):
super().__init__(
unk_token=A__ , pad_token=A__ , bos_token=A__ , eos_token=A__ , do_clean_text=A__ , **A__ , )
if not os.path.isfile(A__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(A__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
A__ : int = do_clean_text
A__ : Optional[int] = load_vocab_and_emoji(A__ , A__ )
A__ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __A ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __A ( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __A ( self , A__ ):
return self.subword_tokenizer.tokenize(A__ , clean=self.do_clean_text )
def __A ( self , A__ ):
return self.vocab.get(A__ , self.vocab.get(self.unk_token ) )
def __A ( self , A__ ):
return self.subword_tokenizer.convert_id_to_token(A__ )
def __A ( self , A__ ):
A__ : Union[str, Any] = """""".join(A__ ).strip()
return out_string
def __A ( self , A__ ):
A__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
A__ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
def __A ( self , A__ , A__ = None ):
A__ : str = 0
if os.path.isdir(A__ ):
A__ : List[str] = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
A__ : int = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
A__ : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
A__ : Any = token_index
writer.write(""",""".join(A__ ) + """\n""" )
index += 1
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , A__ )
return vocab_file, emoji_file
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ ):
A__ : str = vocab # same as swe
A__ : Optional[int] = ids_to_tokens # same as bpe
A__ : Optional[int] = emoji
A__ : Union[str, Any] = np.max([len(A__ ) for w in self.vocab.keys()] )
A__ : List[str] = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
A__ : Optional[Any] = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
A__ : List[str] = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
A__ : Tuple = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A__ : Union[str, Any] = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A__ : str = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
A__ : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
A__ : List[str] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
A__ : Tuple = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def __A ( self , A__ ):
A__ : List[Any] = self.content_repattera.sub("""<URL>""" , A__ )
A__ : Dict = self.content_repattera.sub("""<EMAIL>""" , A__ )
A__ : Dict = self.content_repattera.sub("""<TEL>""" , A__ )
A__ : List[Any] = self.content_repattera.sub("""<DATE>""" , A__ )
A__ : Optional[int] = self.content_repattera.sub("""<DATE>""" , A__ )
A__ : Any = self.content_repattera.sub("""<PRICE>""" , A__ )
A__ : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A__ : Tuple = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __A ( self , A__ , A__=False ):
A__ : Optional[Any] = text.replace(""" """ , """<SP>""" )
A__ : Tuple = text.replace(""" """ , """<SP>""" )
A__ : str = text.replace("""\r\n""" , """<BR>""" )
A__ : Optional[int] = text.replace("""\n""" , """<BR>""" )
A__ : Optional[Any] = text.replace("""\r""" , """<BR>""" )
A__ : Union[str, Any] = text.replace("""\t""" , """<TAB>""" )
A__ : Dict = text.replace("""—""" , """ー""" )
A__ : List[str] = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
A__ : List[Any] = text.replace(A__ , A__ )
if clean:
A__ : List[Any] = self.clean_text(A__ )
def check_simbol(A__ ):
A__ : Any = x.encode()
if len(A__ ) == 1 and len(A__ ) == 2:
A__ : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2A1 and c <= 0xC_2BF)
or (c >= 0xC_780 and c <= 0xC_783)
or (c >= 0xC_AB9 and c <= 0xC_BBF)
or (c >= 0xC_C80 and c <= 0xC_DA2)
):
return True
return False
def checkuae(A__ ):
A__ : Any = x.encode()
if len(A__ ) == 1 and len(A__ ) == 3:
A__ : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE28_080 and c <= 0xE2B_07F:
return True
return False
A__ : int = 0
A__ : Dict = []
while pos < len(A__ ):
A__ : Tuple = min(len(A__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
A__ : Optional[Any] = [] # (token_id, token, pos)
for e in range(A__ , A__ , -1 ):
A__ : int = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A__ ) > 2:
A__ : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A__ ) > 0:
# the smallest token_id is adopted
A__ : Tuple = sorted(A__ , key=lambda A__ : x[0] )[0]
result.append(A__ )
A__ : int = e
else:
A__ : str = pos + 1
A__ : str = text[pos:end]
if check_simbol(A__ ):
result.append("""<KIGOU>""" )
elif checkuae(A__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
A__ : int = end
return result
def __A ( self , A__ , A__="\n" ):
A__ : Tuple = []
A__ : Tuple = []
A__ : Union[str, Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("""utf-8""" , errors="""replace""" ) )
A__ : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(A__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(A__ )
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("""utf-8""" , errors="""replace""" ) )
A__ : List[str] = """""".join(A__ )
return text
| 353 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase (lowercase_: dict , lowercase_: str , lowercase_: set , lowercase_: set , lowercase_: dict , lowercase_: dict , lowercase_: PriorityQueue , lowercase_: dict , lowercase_: float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ : Any = cst_fwd.get(lowercase_ , np.inf )
A__ : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ : Tuple = new_cost_f
A__ : Any = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase (lowercase_: str , lowercase_: str , lowercase_: dict , lowercase_: dict ) -> int:
A__ : Dict = -1
A__ : List[Any] = set()
A__ : Union[str, Any] = set()
A__ : Optional[Any] = {source: 0}
A__ : int = {destination: 0}
A__ : Optional[Any] = {source: None}
A__ : Union[str, Any] = {destination: None}
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ : Tuple = queue_forward.get()
visited_forward.add(lowercase_ )
A__ , A__ : Optional[Any] = queue_backward.get()
visited_backward.add(lowercase_ )
A__ : List[Any] = pass_and_relaxation(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
A__ : List[Any] = pass_and_relaxation(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ : int = shortest_distance
return shortest_path_distance
A_ : List[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A_ : Optional[int] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
import operator
def lowerCamelCase_ ( _a , _a = False , _a = None ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = operator.lt if reverse else operator.gt
lowerCAmelCase__ : Optional[Any] = solution or []
if not arr:
return solution
lowerCAmelCase__ : List[str] = [arr.pop(0 )]
for i, item in enumerate(_a ):
if _operator(_a , sublist[-1] ):
sublist.append(_a )
arr.pop(_a )
# merging sublist into solution list
if not solution:
solution.extend(_a )
else:
while sublist:
lowerCAmelCase__ : Optional[Any] = sublist.pop(0 )
for i, xx in enumerate(_a ):
if not _operator(_a , _a ):
solution.insert(_a , _a )
break
else:
solution.append(_a )
strand_sort(_a , _a , _a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 131 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( _lowercase):
_a : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ChineseCLIPImageProcessor'''
_a : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = self.image_processor
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , **_SCREAMING_SNAKE_CASE : Dict )-> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase__ : List[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCAmelCase__ : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Any )-> int:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Any = self.tokenizer.model_input_names
lowerCAmelCase__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__( self : str )-> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 131 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCamelCase : Union[str, Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCamelCase : List[Any] = spec.loader.load_module()
_lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co./bert-base-uncased)`
_lowerCamelCase : Any = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_lowerCamelCase : Optional[int] = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def _a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE__ : Any = False
# source code of `config_class`
SCREAMING_SNAKE_CASE__ : int = inspect.getsource(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co./bert-base-uncased')`
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE__ : Optional[Any] = f'''https://huggingface.co./{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE__ : List[Any] = True
break
SCREAMING_SNAKE_CASE__ : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
SCREAMING_SNAKE_CASE__ : str = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 191 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_lowerCamelCase : Any = f"https://www.google.com/search?q={query}&num=100"
_lowerCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_lowerCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 191 | 1 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
return sum(
int("".join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def UpperCamelCase__ ( lowercase__ : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase )-> list[int]:
UpperCamelCase = [True] * limit
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase = i * 2
while index < limit:
UpperCamelCase = False
UpperCamelCase = index + i
UpperCamelCase = [2]
for i in range(3 , __UpperCamelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCamelCase )
return primes
def lowercase__ ( __UpperCamelCase = 1000000 )-> int:
UpperCamelCase = prime_sieve(__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = 0
for i in range(len(__UpperCamelCase ) ):
for j in range(i + length , len(__UpperCamelCase ) ):
UpperCamelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase = j - i
UpperCamelCase = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 183 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> str:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : int = seq_length
snake_case_ : Any = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : Tuple = use_token_type_ids
snake_case_ : Optional[int] = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : int = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Any = type_sequence_label_size
snake_case_ : int = initializer_range
snake_case_ : Optional[Any] = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : Tuple = scope
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Dict = None
snake_case_ : Any = None
snake_case_ : Dict = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Any = OpenLlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Any = True
snake_case_ : Optional[int] = OpenLlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Optional[int] = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
snake_case_ : Optional[Any] = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
'''simple docstring'''
snake_case_ : int = True
snake_case_ : str = True
snake_case_ : Tuple = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
snake_case_ : Tuple = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
snake_case_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : Tuple = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0]
snake_case_ : Any = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0]
# select random slice
snake_case_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Tuple = config_and_inputs
snake_case_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ):
lowerCamelCase_ : str = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : str = False
lowerCamelCase_ : Dict = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = OpenLlamaModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Dict = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Any = 3
snake_case_ : Dict = input_dict['''input_ids''']
snake_case_ : List[str] = input_ids.ne(1 ).to(__magic_name__ )
snake_case_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : List[str] = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = 3
snake_case_ : Tuple = '''single_label_classification'''
snake_case_ : Optional[Any] = input_dict['''input_ids''']
snake_case_ : str = input_ids.ne(1 ).to(__magic_name__ )
snake_case_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : List[Any] = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = 3
snake_case_ : Any = '''multi_label_classification'''
snake_case_ : Union[str, Any] = input_dict['''input_ids''']
snake_case_ : Tuple = input_ids.ne(1 ).to(__magic_name__ )
snake_case_ : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ : int = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
snake_case_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Optional[Any] = OpenLlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
snake_case_ : Any = original_model(__magic_name__ ).last_hidden_state
snake_case_ : List[Any] = original_model(__magic_name__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Any = {'''type''': scaling_type, '''factor''': 10.0}
snake_case_ : List[str] = OpenLlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
snake_case_ : Dict = scaled_model(__magic_name__ ).last_hidden_state
snake_case_ : int = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
from math import factorial, radians
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int = 18 , SCREAMING_SNAKE_CASE_ : int = 10 ):
__lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__lowerCAmelCase = radians(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = angle_in_radians
__lowerCAmelCase = 3
__lowerCAmelCase = -1
for _ in range(SCREAMING_SNAKE_CASE_ ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( snake_case__ ):
_a : Optional[int] = """new-model"""
if is_tf_available():
class a__ ( snake_case__ ):
_a : Dict = NewModelConfig
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4_4_1_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4_4_1_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = copy.deepcopy(model.config )
__lowerCAmelCase = ["FunnelBaseModel"]
__lowerCAmelCase = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowerCAmelCase = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , _A )
__lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = BertModelTester(self ).get_config()
__lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowerCAmelCase = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("bert-base" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = TFAutoModel.from_pretrained(_A , revision="aaaaaa" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "Use `from_pt=True` to load this model" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 102 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : List[Any] = threading.Lock()
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Union[str, Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : Optional[Any] = logging.WARNING
UpperCAmelCase__ : List[str] = True
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = os.getenv("""TRANSFORMERS_VERBOSITY""" ,lowercase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE__ : List[Any] = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE__ : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : int = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE__ : int = False
def lowercase_ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE__ : List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
def lowercase_ ( ):
return log_levels
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowercase__ )
def lowercase_ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowercase__ )
def lowercase_ ( ):
return set_verbosity(lowercase__ )
def lowercase_ ( ):
return set_verbosity(lowercase__ )
def lowercase_ ( ):
return set_verbosity(lowercase__ )
def lowercase_ ( ):
return set_verbosity(lowercase__ )
def lowercase_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase_ ( _snake_case ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowercase__ )
def lowercase_ ( _snake_case ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowercase__ )
def lowercase_ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ : Any = False
def lowercase_ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE__ : Any = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(lowercase__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowercase__ )
def lowercase_ ( self ,*_snake_case ,**_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" ,lowercase__ )
if no_advisory_warnings:
return
self.warning(*lowercase__ ,**lowercase__ )
UpperCAmelCase__ : Tuple = warning_advice
@functools.lru_cache(lowercase__ )
def lowercase_ ( self ,*_snake_case ,**_snake_case ):
self.warning(*lowercase__ ,**lowercase__ )
UpperCAmelCase__ : Dict = warning_once
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = args[0] if args else None
def __iter__(self ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Union[str, Any]:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__lowercase , **__lowercase )
else:
return EmptyTqdm(*__lowercase , **__lowercase )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowercase , **__lowercase )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Optional[Any] = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : List[Any] = True
hf_hub_utils.enable_progress_bars()
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : List[Any] = False
hf_hub_utils.disable_progress_bars()
| 25 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase = '''\
Text data.
Second line of data.'''
UpperCAmelCase = '''file'''
@pytest.fixture(scope='session' )
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__lowercase =bytes(lowercase__, 'utf-8' )
with zstd.open(lowercase__, 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir, lowercase__ ), 'w' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] )
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[str], lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
__lowercase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__lowercase =input_paths[compression_format]
__lowercase =tmp_path / 'cache'
__lowercase =DownloadConfig(cache_dir=lowercase__, extract_compressed_file=lowercase__ )
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
with open(lowercase__ ) as f:
__lowercase =f.read()
with open(lowercase__ ) as f:
__lowercase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted', [True, False] )
@pytest.mark.parametrize('default_cache_dir', [True, False] )
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Tuple, lowercase__ : int, lowercase__ : int, lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase ='custom_cache'
__lowercase ='custom_extracted_dir'
__lowercase =tmp_path / 'custom_extracted_path'
if default_extracted:
__lowercase =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', lowercase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(lowercase__ ) )
__lowercase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowercase =xz_file
__lowercase =(
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowercase__ )
)
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__lowercase =str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__lowercase ='./__missing_file__.txt'
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
__lowercase =get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase__ ) as f:
__lowercase =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
cached_path('https://huggingface.co.' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
http_get('https://huggingface.co.', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('https://huggingface.co.' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
ftp_get('ftp://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
fsspec_get('s3://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('s3://huggingface.co' )
| 141 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 153 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case ,__snake_case ,__snake_case ,__snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 153 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A )
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,**SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self ,'''vision''' )
requires_backends(self ,'''torch''' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = {}
__SCREAMING_SNAKE_CASE :Tuple = {}
__SCREAMING_SNAKE_CASE :Any = {}
# preprocess args
if "points_per_batch" in kwargs:
__SCREAMING_SNAKE_CASE :Optional[int] = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__SCREAMING_SNAKE_CASE :Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__SCREAMING_SNAKE_CASE :str = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__SCREAMING_SNAKE_CASE :str = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__SCREAMING_SNAKE_CASE :Tuple = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :Tuple = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__SCREAMING_SNAKE_CASE :Any = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__SCREAMING_SNAKE_CASE :str = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :List[Any] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__SCREAMING_SNAKE_CASE :Any = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__SCREAMING_SNAKE_CASE :int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,num_workers=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 5_12 / 15_00 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1 ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = load_image(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = self.image_processor.size['''longest_edge''']
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_inference_context()
with inference_context():
__SCREAMING_SNAKE_CASE :int = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ ,device=self.device )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__SCREAMING_SNAKE_CASE :Optional[int] = image_embeddings
__SCREAMING_SNAKE_CASE :str = grid_points.shape[1]
__SCREAMING_SNAKE_CASE :Optional[int] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[Any] = grid_points[:, i : i + points_per_batch, :, :]
__SCREAMING_SNAKE_CASE :Tuple = input_labels[:, i : i + points_per_batch]
__SCREAMING_SNAKE_CASE :Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0.8_8 ,SCREAMING_SNAKE_CASE__=0.9_5 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''input_boxes''' )
__SCREAMING_SNAKE_CASE :List[Any] = model_inputs.pop('''is_last''' )
__SCREAMING_SNAKE_CASE :Dict = model_inputs.pop('''original_sizes''' ).tolist()
__SCREAMING_SNAKE_CASE :Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__SCREAMING_SNAKE_CASE :str = self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__SCREAMING_SNAKE_CASE :Tuple = model_outputs['''pred_masks''']
__SCREAMING_SNAKE_CASE :int = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,binarize=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = model_outputs['''iou_scores''']
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.7 ,) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = []
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__SCREAMING_SNAKE_CASE :Tuple = torch.cat(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {}
if output_rle_mask:
__SCREAMING_SNAKE_CASE :Optional[Any] = rle_mask
if output_bboxes_mask:
__SCREAMING_SNAKE_CASE :Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 191 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co./microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co./models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase__ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
UpperCAmelCase__ = {
"""ctrl""": 2_5_6,
}
UpperCAmelCase__ = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(lowercase )
return pairs
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = CONTROL_CODES
def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple="<unk>" , **__lowerCAmelCase : List[str] ):
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__lowerCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCAmelCase ):
try:
_UpperCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__lowerCAmelCase )
_UpperCAmelCase = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCAmelCase )
_UpperCAmelCase = """@@ """.join(__lowerCAmelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
return word
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(R"""\S+\n?""" , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = """ """.join(__lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(__lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 30 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = str(_lowerCamelCase )
lowerCamelCase_ = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int = 11 ) -> list[int]:
lowerCamelCase_ = []
lowerCamelCase_ = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
lowerCamelCase_ = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 183 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = str(_lowerCamelCase )
lowerCamelCase_ = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int = 11 ) -> list[int]:
lowerCamelCase_ = []
lowerCamelCase_ = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
lowerCamelCase_ = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 183 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
_SCREAMING_SNAKE_CASE : List[str] = 5
_SCREAMING_SNAKE_CASE : Dict = 10
@require_sentencepiece
@require_tokenizers
class A__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = SpeechaTextTokenizer
__magic_name__ = False
__magic_name__ = True
def a_ ( self ):
super().setUp()
snake_case = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
snake_case = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
snake_case = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
snake_case = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self ):
snake_case = '<pad>'
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a_ ( self ):
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(snake_case__ ) , 1_0_0_1 )
def a_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def a_ ( self ):
snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
snake_case = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
snake_case = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a_ ( self ):
snake_case = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'valhalla/s2t_mustc_multilinguial_medium'
__magic_name__ = 'C\'est trop cool'
__magic_name__ = 'Esto es genial'
@classmethod
def a_ ( cls ):
snake_case = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a_ ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 1_1 )
def a_ ( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def a_ ( self ):
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
snake_case = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a_ ( self ):
snake_case = 'fr'
snake_case = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a_ ( self ):
snake_case = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 353 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"facebook/detr-resnet-50": "https://huggingface.co./facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co./models?filter=detr
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'detr'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __snake_case=True , __snake_case=None , __snake_case=3 , __snake_case=1_0_0 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=False , __snake_case="sine" , __snake_case="resnet50" , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
snake_case = backbone_config.get('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(__snake_case )
# set timm attributes to None
snake_case , snake_case , snake_case = None, None, None
snake_case = use_timm_backbone
snake_case = backbone_config
snake_case = num_channels
snake_case = num_queries
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = encoder_layers
snake_case = auxiliary_loss
snake_case = position_embedding_type
snake_case = backbone
snake_case = use_pretrained_backbone
snake_case = dilation
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
@classmethod
def a_ ( cls , __snake_case , **__snake_case ):
return cls(backbone_config=__snake_case , **__snake_case )
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a_ ( self ):
return 1E-5
@property
def a_ ( self ):
return 1_2
| 213 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE : Optional[int] = """examples/"""
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
SCREAMING_SNAKE_CASE : Tuple = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
SCREAMING_SNAKE_CASE : Optional[int] = """README.md"""
def lowercase ( _snake_case : str , _snake_case : int , _snake_case : List[str] ) ->Tuple:
"""simple docstring"""
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case : str = f.read()
__snake_case , __snake_case : Optional[Any] = REPLACE_PATTERNS[pattern]
__snake_case : List[str] = replace.replace('''VERSION''' , _snake_case )
__snake_case : Any = re_pattern.sub(_snake_case , _snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_snake_case )
def lowercase ( _snake_case : Any ) ->Tuple:
"""simple docstring"""
for folder, directories, fnames in os.walk(_snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern='''examples''' )
def lowercase ( _snake_case : Optional[int] , _snake_case : str=False ) ->str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_snake_case , _snake_case , _snake_case )
if not patch:
update_version_in_examples(_snake_case )
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : List[Any] = '''🤗 Transformers currently provides the following architectures'''
__snake_case : Optional[int] = '''1. Want to contribute a new model?'''
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case : List[Any] = f.readlines()
# Find the start of the list.
__snake_case : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__snake_case : str = lines[index].replace(
'''https://huggingface.co./docs/transformers/main/model_doc''' , '''https://huggingface.co./docs/transformers/model_doc''' , )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_snake_case )
def lowercase ( ) ->List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__snake_case : Dict = f.read()
__snake_case : Any = REPLACE_PATTERNS['''init'''][0].search(_snake_case ).groups()[0]
return packaging.version.parse(_snake_case )
def lowercase ( _snake_case : List[str]=False ) ->str:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__snake_case : Union[str, Any] = default_version.base_version
elif patch:
__snake_case : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__snake_case : List[Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__snake_case : str = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_snake_case ) == 0:
__snake_case : str = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_snake_case , patch=_snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : Tuple = get_version()
__snake_case : str = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__snake_case : int = current_version.base_version
# Check with the user we got that right.
__snake_case : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_snake_case ) == 0:
__snake_case : Any = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 102 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
"""
SCREAMING_SNAKE_CASE : Any = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co./docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE : Dict = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co./docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co./docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = 16 , a_ = True , a_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : Optional[Any] = '''cuda'''
else:
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : int = AutoModelForCausalLM.from_pretrained(a_ )
__snake_case : Optional[int] = model.to(a_ )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : List[Any] = model.config.max_length - 1
else:
__snake_case : Dict = model.config.max_length
__snake_case : Tuple = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
__snake_case : List[Any] = encodings['''input_ids''']
__snake_case : str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : Union[str, Any] = []
__snake_case : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
__snake_case : Optional[int] = min(start_index + batch_size , len(a_ ) )
__snake_case : int = encoded_texts[start_index:end_index]
__snake_case : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
__snake_case : List[Any] = encoded_batch
with torch.no_grad():
__snake_case : List[str] = model(a_ , attention_mask=a_ ).logits
__snake_case : List[str] = out_logits[..., :-1, :].contiguous()
__snake_case : int = labels[..., 1:].contiguous()
__snake_case : int = attn_mask[..., 1:].contiguous()
__snake_case : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 102 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase_ ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
UpperCamelCase_ ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
UpperCamelCase_ ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( _lowercase , _lowercase ):
return float((preds == labels).mean() )
def a_ ( _lowercase , _lowercase , _lowercase="binary" ):
_UpperCamelCase : List[str] = simple_accuracy(_lowercase , _lowercase )
_UpperCamelCase : Optional[int] = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Any = {}
for id_pred, label in zip(_lowercase , _lowercase ):
_UpperCamelCase : Union[str, Any] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_UpperCamelCase : int = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase : Dict = [(pred, label)]
_UpperCamelCase , _UpperCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase , _UpperCamelCase : Optional[int] = zip(*_lowercase )
_UpperCamelCase : Union[str, Any] = fa_score(y_true=_lowercase , y_pred=_lowercase , average='''macro''' )
fas.append(_lowercase )
_UpperCamelCase : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
_UpperCamelCase : List[Any] = float(sum(_lowercase ) / len(_lowercase ) )
_UpperCamelCase : Tuple = sum(_lowercase ) / len(_lowercase )
_UpperCamelCase : int = float(fa_score(y_true=_lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def snake_case ( self : Tuple ) -> int:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None, )
def snake_case ( self : int ) -> List[str]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def snake_case ( self : Tuple, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__, lowerCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase__, lowerCAmelCase__, fa_avg='''macro''' )
elif self.config_name == "record":
_UpperCamelCase : Optional[Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_UpperCamelCase : List[str] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCAmelCase__, lowerCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase__, lowerCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__, lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 128 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase_ =logging.getLogger(__name__)
class _a ( _lowerCAmelCase ):
UpperCamelCase = '''masked_bert'''
def __init__( self : Optional[Any], lowerCAmelCase__ : Dict=3_0_5_2_2, lowerCAmelCase__ : int=7_6_8, lowerCAmelCase__ : Tuple=1_2, lowerCAmelCase__ : Optional[Any]=1_2, lowerCAmelCase__ : Tuple=3_0_7_2, lowerCAmelCase__ : Optional[int]="gelu", lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Any=5_1_2, lowerCAmelCase__ : Optional[int]=2, lowerCAmelCase__ : Optional[int]=0.02, lowerCAmelCase__ : Union[str, Any]=1e-1_2, lowerCAmelCase__ : Union[str, Any]=0, lowerCAmelCase__ : Dict="topK", lowerCAmelCase__ : Union[str, Any]="constant", lowerCAmelCase__ : Union[str, Any]=0.0, **lowerCAmelCase__ : Any, ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : int = pruning_method
_UpperCamelCase : Union[str, Any] = mask_init
_UpperCamelCase : Any = mask_scale
| 128 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 153 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def a__ ( _SCREAMING_SNAKE_CASE = 1_777 , _SCREAMING_SNAKE_CASE = 1_855 , _SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
UpperCamelCase = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : list[list[Edge]] = [[] for _ in range(UpperCAmelCase_ )]
_lowercase : Any = size
def __getitem__( self ,UpperCAmelCase_ ):
return iter(self._graph[vertex] )
@property
def lowerCamelCase__ ( self ):
return self._size
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(UpperCAmelCase_ ,UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = deque([start_vertex] )
_lowercase : list[int | None] = [None] * self.size
_lowercase : Dict = 0
while queue:
_lowercase : Tuple = queue.popleft()
_lowercase : Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowercase : Dict = current_distance + edge.weight
_lowercase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase_ ,UpperCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_lowercase : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 1 |
import string
from math import logaa
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowercase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase_ = corpus_without_punctuation.split('''\n''' )
lowercase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case__ ))
def a ( snake_case__: int , snake_case__: int , snake_case__: Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 30 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : str = KandinskyVaaImgaImgPipeline
A : List[str] = ["image_embeds", "negative_image_embeds", "image"]
A : Optional[int] = [
"image_embeds",
"negative_image_embeds",
"image",
]
A : Tuple = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A : Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
torch.manual_seed(0)
a : Tuple = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a : Optional[Any] = UNetaDConditionModel(**lowercase_)
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
torch.manual_seed(0)
a : int = VQModel(**self.dummy_movq_kwargs)
return model
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = self.dummy_unet
a : str = self.dummy_movq
a : Any = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
a : List[str] = DDIMScheduler(**lowercase_)
a : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=0):
"""simple docstring"""
a : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
a : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase_)).to(lowercase_)
a : str = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : str = Image.fromarray(np.uinta(lowercase_)).convert('RGB').resize((2_5_6, 2_5_6))
if str(lowercase_).startswith('mps'):
a : int = torch.manual_seed(lowercase_)
else:
a : Any = torch.Generator(device=lowercase_).manual_seed(lowercase_)
a : str = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = '''cpu'''
a : Any = self.get_dummy_components()
a : Any = self.pipeline_class(**lowercase_)
a : Union[str, Any] = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_))
a : Dict = output.images
a : Tuple = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a : Dict = image[0, -3:, -3:, -1]
a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a : str = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = load_numpy(
'https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy')
a : Union[str, Any] = load_image(
'https://huggingface.co./datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a : Optional[Any] = '''A red cartoon frog, 4k'''
a : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
a : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa)
a : List[Any] = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
a : List[str] = torch.Generator(device='cpu').manual_seed(0)
a : Tuple = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a : Optional[Any] = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
a : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |