code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from itertools import count
def a (lowerCAmelCase__ = 50 ):
__a = [1] * min_block_length
for n in count(lowerCAmelCase__ ):
fill_count_functions.append(1 )
for block_length in range(lowerCAmelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99 |
from math import isqrt
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def lowerCamelCase_ ( lowerCamelCase__ = 1_0**8 ):
lowerCamelCase_ = calculate_prime_numbers(max_number // 2 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(num_inference_steps=2 , generator=a , output_type="""numpy""" ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(num_inference_steps=2 , generator=a , output_type="""numpy""" , return_dict=a )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = """google/ncsnpp-celebahq-256"""
_UpperCamelCase = UNetaDModel.from_pretrained(a )
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(num_inference_steps=20 , generator=a , output_type="""numpy""" ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 202 |
def __A(lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float]:
"""simple docstring"""
if not len(lowerCAmelCase ) == len(lowerCAmelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa
# Calculate the determinants of the matrices
_UpperCamelCase = aa * ba - aa * ba
_UpperCamelCase = ca * ba - ca * ba
_UpperCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCamelCase = determinant_x / determinant
_UpperCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 202 | 1 |
"""simple docstring"""
lowercase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355818,
}
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> str:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(_UpperCamelCase )}'''
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[Any] , a : Dict , a : int , a : Any=1_0_2_4 , a : Tuple=1_0_2_4 , a : Optional[int]=3.6 ):
'''simple docstring'''
lowercase_ : List[str] = tokenizer
lowercase_ : Union[str, Any] = tokenizer.bos_token_id
lowercase_ : Union[str, Any] = dataset
lowercase_ : Optional[Any] = seq_length
lowercase_ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = iter(self.dataset )
lowercase_ : Dict = True
while more_examples:
lowercase_ , lowercase_ : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(a )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase_ : int = False
break
lowercase_ : Any = tokenizer(a , truncation=a )["input_ids"]
lowercase_ : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(a ) , self.seq_length ):
lowercase_ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(a ) == self.seq_length:
yield torch.tensor(a )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = {"streaming": True}
lowercase_ : List[Any] = load_dataset(args.dataset_name , split="train" , **_UpperCamelCase )
lowercase_ : int = ConstantLengthDataset(_UpperCamelCase , _UpperCamelCase , seq_length=args.seq_length )
lowercase_ : Dict = DataLoader(_UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
model.eval()
lowercase_ : Optional[Any] = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
lowercase_ : str = model(_UpperCamelCase , labels=_UpperCamelCase )
lowercase_ : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase_ : Optional[int] = torch.mean(torch.cat(_UpperCamelCase ) )
try:
lowercase_ : Dict = torch.exp(_UpperCamelCase )
except OverflowError:
lowercase_ : Dict = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase__ = Accelerator()
# Parse configuration
UpperCamelCase__ = HfArgumentParser(EvaluationArguments)
UpperCamelCase__ = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase__, UpperCamelCase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCamelCase__, UpperCamelCase__ = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 620 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_snake_case = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group' )
UpperCamelCase = primes[group]['prime']
UpperCamelCase = primes[group]['generator']
UpperCamelCase = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE__ )[2:]
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = int(SCREAMING_SNAKE_CASE__ , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid public key' )
UpperCamelCase = pow(SCREAMING_SNAKE_CASE__ , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (prime - 1) // 2 , SCREAMING_SNAKE_CASE__ ) == 1
)
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 14 ):
"""simple docstring"""
UpperCamelCase = int(SCREAMING_SNAKE_CASE__ , base=16 )
UpperCamelCase = int(SCREAMING_SNAKE_CASE__ , base=16 )
UpperCamelCase = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid public key' )
UpperCamelCase = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(1_25.50, 0.05) = }")
| 170 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
def count_of_possible_combinations(_lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_lowerCAmelCase , _lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A : int = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCAmelCase )
for item in array )
A : List[str] = answer
return answer
A : List[str] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Union[str, Any] = [0] * (target + 1)
A : Union[str, Any] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:Optional[int] = 3
SCREAMING_SNAKE_CASE_:Union[str, Any] = 5
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import math
import unittest
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 703 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a = "true"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=16 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
__SCREAMING_SNAKE_CASE = RegressionModel()
__SCREAMING_SNAKE_CASE = deepcopy(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = RegressionDataset(length=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return model, ddp_model, dataloader
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
if use_longest:
return tokenizer.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(__UpperCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(__UpperCAmelCase , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=16 )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=__UpperCAmelCase , split_batches=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = get_dataloader(__UpperCAmelCase , not dispatch_batches )
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCAmelCase )
targs.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.cat(__UpperCAmelCase ), torch.cat(__UpperCAmelCase )
return logits, targs
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=16 ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_basic_setup(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = generate_predictions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert (
len(__UpperCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCAmelCase )}"""
def __magic_name__ ( __UpperCAmelCase = False , __UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_mrpc_setup(__UpperCAmelCase , __UpperCAmelCase )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = setup["""no"""]
model.to(__UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCAmelCase )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCAmelCase , references=batch["""labels"""] )
__SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE = batch["""labels"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCAmelCase , references=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(__UpperCAmelCase , __UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(__UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(__UpperCAmelCase , 512 )
accelerator.state._reset_state()
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 109 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __a ( _snake_case ):
__UpperCamelCase : Any = ''
__UpperCamelCase : int = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Any ,lowerCamelCase : Optional[DatasetInfo] = None ,lowerCamelCase : Optional[str] = None ,**lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(self ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = repo_info
__SCREAMING_SNAKE_CASE = token
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase ): {"""name""": str(lowerCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : str = "rb" ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
if not isinstance(self.repo_info ,lowerCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id ,lowerCamelCase ,revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase ,mode=lowerCamelCase ,headers=get_authentication_headers_for_url(lowerCamelCase ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Any ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : str=False ,**lowerCamelCase : Any ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = p.parent
if root == path:
__SCREAMING_SNAKE_CASE = f
__SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 109 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCamelCase = 42
class lowerCamelCase__ ( a__ ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> Optional[int]:
super().__init__()
self.register_modules(
prior=lowerCAmelCase__ ,image_encoder=lowerCAmelCase__ ,image_processor=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,renderer=lowerCAmelCase__ ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> str:
if latents is None:
A = randn_tensor(lowerCAmelCase__ ,generator=lowerCAmelCase__ ,device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
A = latents.to(lowerCAmelCase__ )
A = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self ,lowerCamelCase_=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(f'cuda:{gpu_id}' )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ ,lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ) -> int:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder ,"""_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase__ ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> str:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and isinstance(image[0] ,torch.Tensor ):
A = torch.cat(lowerCAmelCase__ ,axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase__ ,axis=0 )
if not isinstance(lowerCAmelCase__ ,torch.Tensor ):
A = self.image_processor(lowerCAmelCase__ ,return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype ,device=lowerCAmelCase__ )
A = self.image_encoder(lowerCAmelCase__ )["last_hidden_state"]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(lowerCAmelCase__ ,dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(lowerCAmelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = 1 ,lowerCamelCase_ = 2_5 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = 6_4 ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> List[Any]:
if isinstance(lowerCAmelCase__ ,PIL.Image.Image ):
A = 1
elif isinstance(lowerCAmelCase__ ,torch.Tensor ):
A = image.shape[0]
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and isinstance(image[0] ,(torch.Tensor, PIL.Image.Image) ):
A = len(lowerCAmelCase__ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase__ )}' )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase__ ,device=lowerCAmelCase__ )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) ,image_embeds.dtype ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,self.scheduler ,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] ,lowerCAmelCase__ ,lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
A = self.prior(
lowerCAmelCase__ ,timestep=lowerCAmelCase__ ,proj_embedding=lowerCAmelCase__ ,).predicted_image_embedding
# remove the variance
A = noise_pred.split(
scaled_model_input.shape[2] ,dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
lowerCAmelCase__ ,timestep=lowerCAmelCase__ ,sample=lowerCAmelCase__ ,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase__ )
A = []
for i, latent in enumerate(lowerCAmelCase__ ):
print()
A = self.renderer.decode(
latent[None, :] ,lowerCAmelCase__ ,size=lowerCAmelCase__ ,ray_batch_size=4_0_9_6 ,n_coarse_samples=6_4 ,n_fine_samples=1_2_8 ,)
images.append(lowerCAmelCase__ )
A = torch.stack(lowerCAmelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(lowerCAmelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self ,"""final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase__ )
| 709 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
UpperCAmelCase =Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 255 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->np.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->np.ndarray:
return (gray > 127) & (gray <= 255)
def lowerCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ) ->np.ndarray:
_SCREAMING_SNAKE_CASE = np.zeros_like(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_SCREAMING_SNAKE_CASE = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_SCREAMING_SNAKE_CASE = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_SCREAMING_SNAKE_CASE = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowercase_ = np.array(Image.open(lena_path))
# kernel to be applied
lowercase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 314 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 314 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ : str = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = ["""ViTFeatureExtractor"""]
UpperCamelCase_ : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : int = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 394 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = random.randint(0 , len(_lowercase ) - 1 )
a__ = parent_a[:random_slice] + parent_a[random_slice:]
a__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__ = random.choice(_lowercase )
return "".join(_lowercase )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
a__ = []
# Generate more children proportionally to the fitness score.
a__ = int(parent_a[1] * 1_00 ) + 1
a__ = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
a__ = population_score[random.randint(0 , _lowercase )][0]
a__ , a__ = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
a__ = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
a__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__ = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowercase )
# Generate random starting population.
a__ = []
for _ in range(_lowercase ):
population.append("".join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__ = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
a__ = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
a__ = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ : int = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 394 | 1 |
# using dfs for finding eulerian path traversal
def __UpperCamelCase ( A , A , A , A=None ):
UpperCamelCase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCamelCase__ , UpperCamelCase__ = True, True
UpperCamelCase__ = dfs(A , A , A , A )
return path
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = 0
UpperCamelCase__ = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCamelCase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCamelCase__ , UpperCamelCase__ = check_circuit_or_path(A , A )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCamelCase__ = 1
if check == 2:
UpperCamelCase__ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCamelCase__ = dfs(A , A , A )
print(A )
def __UpperCamelCase ( ):
UpperCamelCase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCamelCase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCamelCase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCamelCase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCamelCase__ = {
1: [],
2: []
# all degree is zero
}
UpperCamelCase__ = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 415 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
UpperCAmelCase_ = json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
UpperCAmelCase_ = args.output + '''.pt'''
UpperCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0''' ):
UpperCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ = reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
UpperCAmelCase_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
UpperCAmelCase_ = 8
UpperCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
UpperCAmelCase_ = key_name[-9:-7]
for i in range(1_6 ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
UpperCAmelCase_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
UpperCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ = state[:, 0, :, :]
UpperCAmelCase_ = state[:, 1, :, :]
UpperCAmelCase_ = state[:, 2, :, :]
UpperCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
UpperCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
UpperCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
UpperCAmelCase_ = '''model.%s.weight''' % nlayer
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
UpperCAmelCase_ = '''lm_head.weight'''
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
UpperCAmelCase_ = '''final_logits_bias'''
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = state.reshape((1, -1) )
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ = '''model.last_project.weight'''
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ = '''model.last_project.bias'''
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 121 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : Union[str, Any] ="""The Nymphenburg Palace is a beautiful palace in Munich!"""
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
UpperCAmelCase_ =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase_ =BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowercase__ , output_all_encodings=lowercase__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowercase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase_ ="openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase_ =os.path.join(get_home_dir() , "models" )
UpperCAmelCase_ =_load_vocab(lowercase__ , lowercase__ , lowercase__ , cls=lowercase__ )
UpperCAmelCase_ =nlp.model.BERTModel(
lowercase__ , len(lowercase__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowercase__ , use_token_type_embed=lowercase__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowercase__ , use_decoder=lowercase__ , )
original_bort.load_parameters(lowercase__ , cast_dtype=lowercase__ , ignore_extra=lowercase__ )
UpperCAmelCase_ =original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase_ ={
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowercase__ ),
}
UpperCAmelCase_ =BertConfig.from_dict(lowercase__ )
UpperCAmelCase_ =BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase__ , lowercase__ ):
UpperCAmelCase_ =hf_param.shape
UpperCAmelCase_ =to_torch(params[gluon_param] )
UpperCAmelCase_ =gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
UpperCAmelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCAmelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCAmelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCAmelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase_ =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase_ =hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase_ =layer.attention.self
UpperCAmelCase_ =check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
UpperCAmelCase_ =check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
UpperCAmelCase_ =check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
UpperCAmelCase_ =check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
UpperCAmelCase_ =check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
UpperCAmelCase_ =check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
UpperCAmelCase_ =layer.attention.output
UpperCAmelCase_ =check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
UpperCAmelCase_ =check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
UpperCAmelCase_ =check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
UpperCAmelCase_ =check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
UpperCAmelCase_ =layer.intermediate
UpperCAmelCase_ =check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
UpperCAmelCase_ =check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
UpperCAmelCase_ =layer.output
UpperCAmelCase_ =check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
UpperCAmelCase_ =check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
UpperCAmelCase_ =check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
UpperCAmelCase_ =check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase_ =RobertaTokenizer.from_pretrained("roberta-base" )
UpperCAmelCase_ =tokenizer.encode_plus(lowercase__ )["input_ids"]
# Get gluon output
UpperCAmelCase_ =mx.nd.array([input_ids] )
UpperCAmelCase_ =original_bort(inputs=lowercase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
UpperCAmelCase_ =BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
UpperCAmelCase_ =tokenizer.encode_plus(lowercase__ , return_tensors="pt" )
UpperCAmelCase_ =hf_bort_model(**lowercase__ )[0]
UpperCAmelCase_ =output_gluon[0].asnumpy()
UpperCAmelCase_ =output_hf[0].detach().numpy()
UpperCAmelCase_ =np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase_ =np.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowercase__ )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase : int =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 550 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=7 , _lowerCAmelCase: int=3 , _lowerCAmelCase: int=18 , _lowerCAmelCase: Optional[int]=30 , _lowerCAmelCase: List[str]=400 , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Optional[int]=True , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =image_size
UpperCAmelCase_ =min_resolution
UpperCAmelCase_ =max_resolution
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =apply_ocr
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( __lowercase , unittest.TestCase ):
_snake_case =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "apply_ocr" ) )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _lowerCAmelCase )
self.assertIsInstance(encoding.boxes , _lowerCAmelCase )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ =[["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase_ =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCAmelCase )
self.assertListEqual(encoding.boxes , _lowerCAmelCase )
# with apply_OCR = False
UpperCAmelCase_ =LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase )
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 550 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str]=1_3 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Dict=3_7 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=1_0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = TFViTModel(config=__lowerCAmelCase )
_snake_case = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
_snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = self.type_sequence_label_size
_snake_case = TFViTForImageClassification(__lowerCAmelCase )
_snake_case = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFViTForImageClassification(__lowerCAmelCase )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __a,__a,unittest.TestCase ):
A__ : List[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A__ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
A__ : int = False
A__ : int = False
A__ : Union[str, Any] = False
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = TFViTModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCAmelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**__lowerCAmelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
| 103 | '''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def A_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=[] ):
_lowerCAmelCase = size[0] - overlap_pixels * 2
_lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowerCAmelCase = np.pad(_lowerCamelCase , mode='linear_ramp' , pad_width=_lowerCamelCase , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ):
return max(_lowerCamelCase , min(_lowerCamelCase , _lowerCamelCase ) )
def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : [int] , _lowerCamelCase : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : int , _lowerCamelCase : [int] ):
_lowerCAmelCase = list(_lowerCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase = clamp_rect(_lowerCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
_lowerCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowerCamelCase , (original_slice, 0) )
return result
def A_ ( _lowerCamelCase : str , _lowerCamelCase : str ):
_lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase = tile.crop(_lowerCamelCase )
return tile
def A_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
_lowerCAmelCase = n % d
return n - divisor
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : int = 350 , ):
"""simple docstring"""
super().__init__(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , )
def a ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size )
_lowerCAmelCase = image.crop(__lowerCAmelCase )
_lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase = max(0 , __lowerCAmelCase )
_lowerCAmelCase = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = to_input.size
_lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0]
_lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='L' , )
final_image.paste(
__lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 75 , __lowerCAmelCase : float = 9.0 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 128 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , ):
"""simple docstring"""
_lowerCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase = tcx * tcy
_lowerCAmelCase = 0
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
self._process_tile(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def A_ ( ):
# Run a demo
_lowerCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCamelCase , revision='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to('cuda' )
_lowerCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_lowerCamelCase : Any ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowerCAmelCase = pipe(image=_lowerCamelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_lowerCamelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 309 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = CTRLTokenizer
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__UpperCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 293 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A ( snake_case :Optional[int] ) -> str:
__UpperCamelCase = torch.exp(snake_case )
__UpperCamelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
__UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = config.output_attentions
__UpperCamelCase = config.output_hidden_states
__UpperCamelCase = nn.ModuleList([BertLayer(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = nn.ModuleList([BertHighway(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if (type(__UpperCAmelCase ) is float) or (type(__UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase = x
else:
__UpperCamelCase = x
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = ()
__UpperCamelCase = ()
__UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = layer_module(
__UpperCAmelCase , __UpperCAmelCase , head_mask[i] , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase = all_attentions + (layer_outputs[1],)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = current_outputs + (all_attentions,)
__UpperCamelCase = self.highway[i](__UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase = highway_exit[0]
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__UpperCAmelCase , i + 1 )
else:
__UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = outputs + (all_attentions,)
__UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config
__UpperCamelCase = BertEmbeddings(__UpperCAmelCase )
__UpperCamelCase = DeeBertEncoder(__UpperCAmelCase )
__UpperCamelCase = BertPooler(__UpperCAmelCase )
self.init_weights()
def UpperCAmelCase ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = value
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase = torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase = encoder_attention_mask[:, None, None, :]
__UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase = self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase = self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
__UpperCamelCase = self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
__UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = message
__UpperCamelCase = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = BertPooler(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase = bmodel_output[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeBertModel(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.bert(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 293 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
_A = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self :str , a :Optional[Any] , a :List[str] , a :Optional[int] ) -> Union[str, Any]:
__UpperCamelCase : int = AudioClassificationPipeline(model=a , feature_extractor=a )
# test with a raw waveform
__UpperCamelCase : Union[str, Any] = np.zeros((3_4_0_0_0,) )
__UpperCamelCase : Optional[Any] = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self :Dict , a :Dict , a :str ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase : List[Any] = examples
__UpperCamelCase : Dict = audio_classifier(a )
# by default a model is initialized with num_labels=2
self.assertEqual(
a , [
{"score": ANY(a ), "label": ANY(a )},
{"score": ANY(a ), "label": ANY(a )},
] , )
__UpperCamelCase : Any = audio_classifier(a , top_k=1 )
self.assertEqual(
a , [
{"score": ANY(a ), "label": ANY(a )},
] , )
self.run_torchaudio(a )
@require_torchaudio
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> Optional[int]:
import datasets
# test with a local file
__UpperCamelCase : int = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
__UpperCamelCase : Dict = dataset[0]["audio"]["array"]
__UpperCamelCase : List[Any] = audio_classifier(a )
self.assertEqual(
a , [
{"score": ANY(a ), "label": ANY(a )},
{"score": ANY(a ), "label": ANY(a )},
] , )
@require_torch
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : Dict = "anton-l/wav2vec2-random-tiny-classifier"
__UpperCamelCase : Union[str, Any] = pipeline("audio-classification" , model=a )
__UpperCamelCase : Dict = np.ones((8_0_0_0,) )
__UpperCamelCase : Any = audio_classifier(a , top_k=4 )
__UpperCamelCase : Optional[int] = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
__UpperCamelCase : Union[str, Any] = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(a , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__UpperCamelCase : Optional[int] = {"array": np.ones((8_0_0_0,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
__UpperCamelCase : str = audio_classifier(a , top_k=4 )
self.assertIn(nested_simplify(a , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCamelCase ( self :List[str] ) -> str:
import datasets
__UpperCamelCase : Any = "superb/wav2vec2-base-superb-ks"
__UpperCamelCase : Union[str, Any] = pipeline("audio-classification" , model=a )
__UpperCamelCase : Dict = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
__UpperCamelCase : Any = np.array(dataset[3]["speech"] , dtype=np.floataa )
__UpperCamelCase : Any = audio_classifier(a , top_k=4 )
self.assertEqual(
nested_simplify(a , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _lowerCamelCase ( self :List[str] ) -> Optional[Any]:
pass | 557 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : str = BertConfig.from_json_file(_lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 557 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , _UpperCAmelCase : int = 128 , _UpperCAmelCase : int = 256 , _UpperCAmelCase : float = 2000.0 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 2_048 , _UpperCAmelCase : float = 0.1 , ):
super().__init__()
_A = nn.Sequential(
nn.Linear(_UpperCAmelCase , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , )
_A = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase )
_A = False
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_A = nn.Dropout(p=_UpperCAmelCase )
_A = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase ):
# FiLM conditional T5 decoder
_A = DecoderLayer(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase )
self.decoders.append(_UpperCAmelCase )
_A = TaLayerNorm(_UpperCAmelCase )
_A = nn.Dropout(p=_UpperCAmelCase )
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
_A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
_A , _A , _A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_A = self.conditioning_emb(_UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_A = torch.broadcast_to(
torch.arange(_UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_A = self.position_encoding(_UpperCAmelCase )
_A = self.continuous_inputs_projection(_UpperCAmelCase )
inputs += position_encodings
_A = self.dropout(_UpperCAmelCase )
# decoder: No padding present.
_A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_A = [(x, self.encoder_decoder_mask(_UpperCAmelCase , _UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_A = lyr(
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )[0]
_A = self.decoder_norm(_UpperCAmelCase )
_A = self.post_dropout(_UpperCAmelCase )
_A = self.spec_out(_UpperCAmelCase )
return spec_out
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=1E-6 ):
super().__init__()
_A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=None , _UpperCAmelCase : str=None , ):
_A = self.layer[0](
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
if encoder_hidden_states is not None:
_A = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
_A = self.layer[1](
_UpperCAmelCase , key_value_states=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
_A = self.layer[-1](_UpperCAmelCase , _UpperCAmelCase )
return (hidden_states,)
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
super().__init__()
_A = TaLayerNorm(_UpperCAmelCase )
_A = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase )
_A = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase )
_A = nn.Dropout(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , ):
# pre_self_attention_layer_norm
_A = self.layer_norm(_UpperCAmelCase )
if conditioning_emb is not None:
_A = self.FiLMLayer(_UpperCAmelCase , _UpperCAmelCase )
# Self-attention block
_A = self.attention(_UpperCAmelCase )
_A = hidden_states + self.dropout(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ):
super().__init__()
_A = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase )
_A = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase )
_A = nn.Dropout(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , ):
_A = self.layer_norm(_UpperCAmelCase )
_A = self.attention(
_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
_A = hidden_states + self.dropout(_UpperCAmelCase )
return layer_output
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
super().__init__()
_A = TaDenseGatedActDense(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase )
_A = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase )
_A = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase )
_A = nn.Dropout(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ):
_A = self.layer_norm(_UpperCAmelCase )
if conditioning_emb is not None:
_A = self.film(_UpperCAmelCase , _UpperCAmelCase )
_A = self.DenseReluDense(_UpperCAmelCase )
_A = hidden_states + self.dropout(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
super().__init__()
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_A = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_A = nn.Dropout(_UpperCAmelCase )
_A = NewGELUActivation()
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[str, Any] ):
_A = self.act(self.wi_a(_UpperCAmelCase ) )
_A = self.wi_a(_UpperCAmelCase )
_A = hidden_gelu * hidden_linear
_A = self.dropout(_UpperCAmelCase )
_A = self.wo(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=1E-6 ):
super().__init__()
_A = nn.Parameter(torch.ones(_UpperCAmelCase ) )
_A = eps
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Union[str, Any] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_UpperCAmelCase )
_A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_UpperCAmelCase , 3.0 )) ))
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
super().__init__()
_A = nn.Linear(_UpperCAmelCase , out_features * 2 , bias=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ):
_A = self.scale_bias(_UpperCAmelCase )
_A , _A = torch.chunk(_UpperCAmelCase , 2 , -1 )
_A = x * (1 + scale) + shift
return x
| 505 |
"""simple docstring"""
a = 256
# Modulus to hash a string
a = 1_000_003
def _snake_case ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
_A = 'abc1abc12'
_A = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_A = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = 'ABABX'
_A = 'ABABZABABYABABX'
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = 'AAAB'
_A = 'ABAAAAAB'
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = 'abcdabcy'
_A = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = 'Lü'
_A = 'Lüsai'
assert rabin_karp(_snake_case , _snake_case )
_A = 'Lue'
assert not rabin_karp(_snake_case , _snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 505 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """spiece.model"""}
A__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
A__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
A__ = """▁"""
class _lowerCAmelCase ( snake_case__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __snake_case : List[str] , __snake_case : Dict=True , __snake_case : Tuple=True , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : List[Any]="[SEP]" , __snake_case : Optional[Any]="<unk>" , __snake_case : Optional[Any]="[SEP]" , __snake_case : str="<pad>" , __snake_case : Optional[int]="[CLS]" , __snake_case : int="[MASK]" , __snake_case : Optional[Any] = None , **__snake_case : Union[str, Any] , ):
lowerCamelCase :str = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
lowerCamelCase :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCamelCase :Optional[Any] = do_lower_case
lowerCamelCase :List[str] = remove_space
lowerCamelCase :Dict = keep_accents
lowerCamelCase :str = vocab_file
lowerCamelCase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case ( self : Optional[int] ):
return len(self.sp_model )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
lowerCamelCase :List[Any] = self.__dict__.copy()
lowerCamelCase :Union[str, Any] = None
return state
def __setstate__( self : Any , __snake_case : int ):
lowerCamelCase :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :Dict = {}
lowerCamelCase :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Dict , __snake_case : Optional[Any] ):
if self.remove_space:
lowerCamelCase :str = ''' '''.join(inputs.strip().split() )
else:
lowerCamelCase :List[Any] = inputs
lowerCamelCase :Dict = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
lowerCamelCase :List[str] = unicodedata.normalize('''NFKD''' , _UpperCamelCase )
lowerCamelCase :Optional[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
lowerCamelCase :Optional[Any] = outputs.lower()
return outputs
def snake_case ( self : Dict , __snake_case : List[Any] ):
lowerCamelCase :Any = self.preprocess_text(_UpperCamelCase )
lowerCamelCase :List[Any] = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
lowerCamelCase :int = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCamelCase :Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase :Tuple = cur_pieces[1:]
else:
lowerCamelCase :Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def snake_case ( self : Dict , __snake_case : Tuple ):
return self.sp_model.PieceToId(_UpperCamelCase )
def snake_case ( self : int , __snake_case : List[Any] ):
return self.sp_model.IdToPiece(_UpperCamelCase )
def snake_case ( self : List[str] , __snake_case : List[str] ):
lowerCamelCase :str = []
lowerCamelCase :str = ''''''
lowerCamelCase :Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Optional[Any] = []
else:
current_sub_tokens.append(_UpperCamelCase )
lowerCamelCase :List[Any] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case ( self : List[Any] , __snake_case : List[str] , __snake_case : str = None ):
lowerCamelCase :Optional[int] = [self.sep_token_id]
lowerCamelCase :str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] = None , __snake_case : Union[str, Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any = None ):
lowerCamelCase :int = [self.sep_token_id]
lowerCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : List[Any] = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Optional[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
lowerCamelCase :int = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 166 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def lowerCamelCase__ ( __A :str ,__A :str ):
"""simple docstring"""
__snake_case = random.randint(0 ,len(__A ) - 1 )
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( __A :str ,__A :list[str] ):
"""simple docstring"""
__snake_case = list(__A )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
__snake_case = random.choice(__A )
return "".join(__A )
def lowerCamelCase__ ( __A :tuple[str, float] ,__A :list[tuple[str, float]] ,__A :list[str] ,):
"""simple docstring"""
__snake_case = []
# Generate more children proportionally to the fitness score.
__snake_case = int(parent_a[1] * 1_0_0 ) + 1
__snake_case = 1_0 if child_n >= 1_0 else child_n
for _ in range(__A ):
__snake_case = population_score[random.randint(0 ,__A )][0]
__snake_case , __snake_case = crossover(parent_a[0] ,__A )
# Append new string to the population list.
pop.append(mutate(__A ,__A ) )
pop.append(mutate(__A ,__A ) )
return pop
def lowerCamelCase__ ( __A :str ,__A :list[str] ,__A :bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
__snake_case = []
for _ in range(__A ):
population.append("""""".join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case , __snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case = [evaluate(__A ,__A ) for item in population]
# Check if there is a matching evolution.
__snake_case = sorted(__A ,key=lambda __A : x[1] ,reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
__snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] ,__A ,__A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
UpperCamelCase__ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 268 | 0 |
def snake_case_ ( snake_case , snake_case ) -> Union[str, Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowercase__: Any = (boundary[1] - boundary[0]) / steps
lowercase__: Optional[Any] = boundary[0]
lowercase__: int = boundary[1]
lowercase__: int = make_points(snake_case , snake_case , snake_case )
lowercase__: Optional[Any] = 0.0
y += (h / 2.0) * f(snake_case )
for i in x_i:
# print(i)
y += h * f(snake_case )
y += (h / 2.0) * f(snake_case )
return y
def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[int]:
lowercase__: Dict = a + h
while x < (b - h):
yield x
lowercase__: List[str] = x + h
def snake_case_ ( snake_case ) -> Any: # enter your function here
lowercase__: Optional[Any] = (x - 0) * (x - 0)
return y
def snake_case_ ( ) -> Optional[int]:
lowercase__: Tuple = 0.0 # Lower bound of integration
lowercase__: Dict = 1.0 # Upper bound of integration
lowercase__: Any = 1_0.0 # define number of steps or resolution
lowercase__: Tuple = [a, b] # define boundary of integration
lowercase__: Optional[Any] = method_a(snake_case , snake_case )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 335 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Union[str, Any] = 'upernet'
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=512 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=384 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__: str = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: str = backbone_config.get('model_type' )
lowercase__: Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__: Dict = config_class.from_dict(lowerCAmelCase__ )
lowercase__: List[Any] = backbone_config
lowercase__: Union[str, Any] = hidden_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[int] = pool_scales
lowercase__: Union[str, Any] = use_auxiliary_head
lowercase__: Any = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_in_channels
lowercase__: Optional[Any] = auxiliary_channels
lowercase__: List[Any] = auxiliary_num_convs
lowercase__: List[str] = auxiliary_concat_input
lowercase__: Any = loss_ignore_index
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = copy.deepcopy(self.__dict__ )
lowercase__: List[Any] = self.backbone_config.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 335 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _UpperCAmelCase ( UpperCamelCase: Dict ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(UpperCamelCase , UpperCamelCase )
return k
def _UpperCAmelCase ( UpperCamelCase: dict , UpperCamelCase: dict ):
"""simple docstring"""
__lowerCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase )
__lowerCAmelCase = PegasusConfig(**UpperCamelCase )
__lowerCAmelCase = PegasusForConditionalGeneration(UpperCamelCase )
__lowerCAmelCase = torch_model.model.state_dict()
__lowerCAmelCase = {}
for k, v in tf_weights.items():
__lowerCAmelCase = rename_state_dict_key(UpperCamelCase )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__lowerCAmelCase = v.T
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__lowerCAmelCase = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
__lowerCAmelCase = mapping["shared.weight"]
__lowerCAmelCase = mapping["shared.weight"]
__lowerCAmelCase = {k: torch.zeros_like(UpperCamelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = torch_model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
__lowerCAmelCase = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def _UpperCAmelCase ( UpperCamelCase: Tuple="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
__lowerCAmelCase = tf.train.list_variables(UpperCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCamelCase , desc="converting tf checkpoint to dict" ):
__lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = array
return tf_weights
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = Path(UpperCamelCase ).parent.name
__lowerCAmelCase = task_specific_params[F"summarization_{dataset}"]["max_position_embeddings"]
__lowerCAmelCase = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase )
# convert model
__lowerCAmelCase = get_tf_weights_as_numpy(UpperCamelCase )
__lowerCAmelCase = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
__lowerCAmelCase = task_specific_params
__lowerCAmelCase = convert_pegasus(UpperCamelCase , UpperCamelCase )
torch_model.save_pretrained(UpperCamelCase )
__lowerCAmelCase = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCamelCase , Path(UpperCamelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCamelCase ,scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] ,_UpperCamelCase : int = 1 ,_UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_UpperCamelCase : float = 0.0 ,_UpperCamelCase : int = 5_0 ,_UpperCamelCase : Optional[bool] = None ,_UpperCamelCase : Optional[str] = "pil" ,_UpperCamelCase : bool = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ =randn_tensor(_UpperCamelCase ,generator=_UpperCamelCase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ =self.unet(_UpperCamelCase ,_UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ =self.scheduler.step(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,eta=_UpperCamelCase ,use_clipped_model_output=_UpperCamelCase ,generator=_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ =(image / 2 + 0.5).clamp(0 ,1 )
SCREAMING_SNAKE_CASE__ =image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ =self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 588 |
import functools
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
@functools.cache
def min_distance(__UpperCamelCase, __UpperCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE__ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, __UpperCamelCase ), 1 + min_distance(__UpperCamelCase, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple = 1_0_0_0_0_0_0 ):
_A = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowercase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 107 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a__ ( A__ , A__ ):
UpperCAmelCase__ = '''focalnet'''
def __init__( self :Any , _lowerCamelCase :List[Any]=224 , _lowerCamelCase :str=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[Any]=96 , _lowerCamelCase :Dict=False , _lowerCamelCase :Optional[Any]=[192, 384, 768, 768] , _lowerCamelCase :Tuple=[2, 2, 6, 2] , _lowerCamelCase :List[Any]=[2, 2, 2, 2] , _lowerCamelCase :Optional[int]=[3, 3, 3, 3] , _lowerCamelCase :Optional[Any]="gelu" , _lowerCamelCase :Tuple=4.0 , _lowerCamelCase :Optional[Any]=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=1E-4 , _lowerCamelCase :Optional[int]=False , _lowerCamelCase :List[str]=False , _lowerCamelCase :str=False , _lowerCamelCase :Optional[int]=0.02 , _lowerCamelCase :int=1E-5 , _lowerCamelCase :Tuple=32 , _lowerCamelCase :List[str]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :List[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
UpperCamelCase_ : Any =image_size
UpperCamelCase_ : int =patch_size
UpperCamelCase_ : int =num_channels
UpperCamelCase_ : Union[str, Any] =embed_dim
UpperCamelCase_ : int =use_conv_embed
UpperCamelCase_ : Optional[int] =hidden_sizes
UpperCamelCase_ : str =depths
UpperCamelCase_ : Any =focal_levels
UpperCamelCase_ : List[str] =focal_windows
UpperCamelCase_ : str =hidden_act
UpperCamelCase_ : Dict =mlp_ratio
UpperCamelCase_ : List[str] =hidden_dropout_prob
UpperCamelCase_ : Optional[int] =drop_path_rate
UpperCamelCase_ : str =use_layerscale
UpperCamelCase_ : List[Any] =layerscale_value
UpperCamelCase_ : Optional[int] =use_post_layernorm
UpperCamelCase_ : Dict =use_post_layernorm_in_modulation
UpperCamelCase_ : Optional[Any] =normalize_modulator
UpperCamelCase_ : List[Any] =initializer_range
UpperCamelCase_ : List[str] =layer_norm_eps
UpperCamelCase_ : List[Any] =encoder_stride
UpperCamelCase_ : str =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase_ , UpperCamelCase_ : int =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 357 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : str = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :List[str] , a :Tuple=False ) -> List[Any]:
a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( a :List[str] , a :int , a :Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
a = ''''''
else:
a = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[
: config.hidden_size, :
]
a = in_proj_bias[: config.hidden_size]
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = in_proj_bias[-config.hidden_size :]
def _a ( a :List[Any] ) -> Dict:
a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
def _a ( a :Union[str, Any] , a :str , a :List[Any] ) -> str:
a = dct.pop(a )
a = val
def _a ( ) -> Optional[Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _a ( a :Tuple , a :str , a :Optional[int]=True ) -> Dict:
a = ViTConfig()
# patch_size
if model_name[-1] == "8":
a = 8
# set labels if required
if not base_model:
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a = 384
a = 1_536
a = 12
a = 6
# load original model from torch hub
a = torch.hub.load('''facebookresearch/dino:main''' , a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a = original_model.state_dict()
if base_model:
remove_classification_head_(a )
a = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
if base_model:
a = ViTModel(a , add_pooling_layer=a ).eval()
else:
a = ViTForImageClassification(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by ViTImageProcessor
a = ViTImageProcessor()
a = image_processor(images=prepare_img() , return_tensors='''pt''' )
a = encoding['''pixel_values''']
a = model(a )
if base_model:
a = original_model(a )
assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a = original_model(a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 117 |
from __future__ import annotations
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : int ) ->None:
"""simple docstring"""
a = order
# a_{0} ... a_{k}
a = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a = [0.0] * self.order
# y[n-1] ... y[n-k]
a = [0.0] * self.order
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : list[float] , __UpperCAmelCase : list[float] ) ->None:
"""simple docstring"""
if len(__UpperCAmelCase ) < self.order:
a = [1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
a = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
a = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
a = a_coeffs
a = b_coeffs
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : float ) ->float:
"""simple docstring"""
a = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a = self.input_history[:-1]
a = self.output_history[:-1]
a = sample
a = result
return result
| 117 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : int="</s>" , a : int="<unk>" , a : Optional[Any]="<pad>" , a : int=125 , a : List[Any]=None , **a : Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE = [f"""<extra_id_{i}>""" for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE = len(set(filter(lambda a : bool("""extra_id""" in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE = extra_ids
SCREAMING_SNAKE_CASE = 2**8 # utf is 8 bits
# define special tokens dict
SCREAMING_SNAKE_CASE = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
SCREAMING_SNAKE_CASE = len(self.special_tokens_encoder )
SCREAMING_SNAKE_CASE = len(_lowercase )
for i, token in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE = self.vocab_size + i - n
SCREAMING_SNAKE_CASE = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCAmelCase ( self : List[Any] , a : Optional[Any] , a : Any = None , a : Optional[Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def _UpperCAmelCase ( self : int , a : Optional[Any] ) -> List[int]:
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self : Tuple , a : List[Any] , a : Optional[int] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self : Optional[Any] , a : Dict , a : Union[str, Any] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def _UpperCAmelCase ( self : str , a : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE = [chr(_lowercase ) for i in text.encode("""utf-8""" )]
return tokens
def _UpperCAmelCase ( self : Union[str, Any] , a : Tuple ) -> str:
if token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE = self.added_tokens_encoder[token]
elif len(_lowercase ) != 1:
SCREAMING_SNAKE_CASE = self.unk_token_id
else:
SCREAMING_SNAKE_CASE = ord(_lowercase ) + self._num_special_tokens
return token_id
def _UpperCAmelCase ( self : Union[str, Any] , a : List[Any] ) -> List[Any]:
if index in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE = self.special_tokens_decoder[index]
else:
SCREAMING_SNAKE_CASE = chr(index - self._num_special_tokens )
return token
def _UpperCAmelCase ( self : Any , a : Any ) -> int:
SCREAMING_SNAKE_CASE = B''
for token in tokens:
if token in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
SCREAMING_SNAKE_CASE = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE = token.encode("""utf-8""" )
else:
SCREAMING_SNAKE_CASE = bytes([ord(_lowercase )] )
bstring += tok_string
SCREAMING_SNAKE_CASE = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def _UpperCAmelCase ( self : List[Any] , a : Optional[Any] , a : Tuple = None ) -> Tuple[str]:
return ()
| 707 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_1_2,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__A : Union[str, Any] = parser.parse_args()
__A : Any = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 450 | 0 |
'''simple docstring'''
import qiskit
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
__a = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__a = qiskit.QuantumCircuit(a__, a__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
__a = qiskit.execute(a__, a__, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a__ )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""") | 448 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 618 | 0 |
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, "src", "diffusers")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =find_backend(" if not is_torch_available():")
self.assertEqual(UpperCAmelCase_ , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowerCamelCase__: Union[str, Any] =find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(UpperCAmelCase_ , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowerCamelCase__: Optional[int] =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(UpperCAmelCase_ , "torch_and_transformers_and_onnx")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCAmelCase_)
self.assertIn("torch_and_transformers" , UpperCAmelCase_)
self.assertIn("flax_and_transformers" , UpperCAmelCase_)
self.assertIn("torch_and_transformers_and_onnx" , UpperCAmelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(UpperCAmelCase_ , "\nCONSTANT = None\n")
lowerCamelCase__: str =create_dummy_object("function" , "'torch'")
self.assertEqual(
UpperCAmelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
lowerCamelCase__: Tuple ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
lowerCamelCase__: Tuple =create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
lowerCamelCase__: Tuple =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , UpperCAmelCase_)
| 437 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : List[str] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: int =size if size is not None else {"shortest_edge": 384}
lowerCamelCase__: Tuple =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =do_resize
lowerCamelCase__: Union[str, Any] =size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase__: str =crop_pct if crop_pct is not None else 224 / 256
lowerCamelCase__: Optional[int] =resample
lowerCamelCase__: str =do_rescale
lowerCamelCase__: List[Any] =rescale_factor
lowerCamelCase__: Dict =do_normalize
lowerCamelCase__: List[str] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: Dict =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : float , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
lowerCamelCase__: Tuple =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase__: List[str] =int(shortest_edge / crop_pct)
lowerCamelCase__: List[str] =get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: List[str] =resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCAmelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCAmelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ) ->Tuple:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ) ->PIL.Image.Image:
'''simple docstring'''
lowerCamelCase__: List[Any] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: List[Any] =crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__: str =resample if resample is not None else self.resample
lowerCamelCase__: str =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: int =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Any =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: Any =image_std if image_std is not None else self.image_std
lowerCamelCase__: List[str] =size if size is not None else self.size
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Any =make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
lowerCamelCase__: List[Any] =[to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
lowerCamelCase__: Dict =[self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , crop_pct=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_rescale:
lowerCamelCase__: Union[str, Any] =[self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
lowerCamelCase__: Tuple =[self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
lowerCamelCase__: Optional[Any] =[to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
lowerCamelCase__: Optional[Any] ={"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 437 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _lowercase :
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__magic_name__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__magic_name__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
__magic_name__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = inputs['''prompt''']
__magic_name__ = inputs['''generator''']
__magic_name__ = inputs['''num_inference_steps''']
__magic_name__ = inputs['''output_type''']
if "image" in inputs:
__magic_name__ = inputs['''image''']
else:
__magic_name__ = None
if "mask_image" in inputs:
__magic_name__ = inputs['''mask_image''']
else:
__magic_name__ = None
if "original_image" in inputs:
__magic_name__ = inputs['''original_image''']
else:
__magic_name__ = None
__magic_name__ , __magic_name__ = pipe.encode_prompt(UpperCamelCase_ )
# inputs with prompt converted to embeddings
__magic_name__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__magic_name__ = image
if mask_image is not None:
__magic_name__ = mask_image
if original_image is not None:
__magic_name__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
__magic_name__ = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = inputs['''generator''']
__magic_name__ = inputs['''num_inference_steps''']
__magic_name__ = inputs['''output_type''']
# inputs with prompt converted to embeddings
__magic_name__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__magic_name__ = image
if mask_image is not None:
__magic_name__ = mask_image
if original_image is not None:
__magic_name__ = original_image
__magic_name__ = pipe_loaded(**UpperCamelCase_ )[0]
__magic_name__ = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1E-4 )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
__magic_name__ = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = pipe_loaded(**UpperCamelCase_ )[0]
__magic_name__ = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1E-4 )
| 490 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''Wav2Vec2FeatureExtractor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.feature_extractor
__magic_name__ = False
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ = kwargs.pop('''raw_speech''' )
else:
__magic_name__ = kwargs.pop('''audio''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
__magic_name__ = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = kwargs.pop('''input_features''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if input_features is not None:
__magic_name__ = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
__magic_name__ = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ = labels['''input_ids''']
return input_features
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCAmelCase__ ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False
| 490 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=1_00 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=4 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , snake_case_=[0, 1, 2, 3] , ):
lowercase =parent
lowercase =1_00
lowercase =batch_size
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =is_training
lowercase =use_labels
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =scope
lowercase =out_indices
lowercase =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase =(image_size // patch_size) ** 2
lowercase =num_patches + 1
def _A( self ):
lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase =self.get_config()
return config, pixel_values, labels, pixel_labels
def _A( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =BeitModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =BeitForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.type_sequence_label_size
lowercase =BeitForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase =1
lowercase =BeitForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase =model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_labels
lowercase =BeitForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase =model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase =config_and_inputs
lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =BeitModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _A( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _A( self ):
pass
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase =[*signature.parameters.keys()]
lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
def _A( self ):
if not self.model_tester.is_training:
return
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case_ ), BeitForMaskedImageModeling]:
continue
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.train()
lowercase =self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
lowercase =model(**snake_case_ ).loss
loss.backward()
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase =False
lowercase =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase =model_class(snake_case_ )
model.gradient_checkpointing_enable()
model.to(snake_case_ )
model.train()
lowercase =self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
lowercase =model(**snake_case_ ).loss
loss.backward()
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =_config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
lowercase =model_class(config=snake_case_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _A( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =BeitModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _A( self ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _A( self ):
lowercase =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(snake_case_ )
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).pixel_values.to(snake_case_ )
# prepare bool_masked_pos
lowercase =torch.ones((1, 1_96) , dtype=torch.bool ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
lowercase =outputs.logits
# verify the logits
lowercase =torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , snake_case_ )
lowercase =torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(snake_case_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def _A( self ):
lowercase =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(snake_case_ )
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ )
lowercase =outputs.logits
# verify the logits
lowercase =torch.Size((1, 10_00) )
self.assertEqual(logits.shape , snake_case_ )
lowercase =torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(snake_case_ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
lowercase =2_81
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def _A( self ):
lowercase =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
snake_case_ )
lowercase =self.default_image_processor
lowercase =prepare_img()
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ )
lowercase =outputs.logits
# verify the logits
lowercase =torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , snake_case_ )
lowercase =torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(snake_case_ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
lowercase =23_96
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def _A( self ):
lowercase =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase =model.to(snake_case_ )
lowercase =BeitImageProcessor(do_resize=snake_case_ , size=6_40 , do_center_crop=snake_case_ )
lowercase =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase =Image.open(ds[0]['''file'''] )
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ )
lowercase =outputs.logits
# verify the logits
lowercase =torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , snake_case_ )
lowercase =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase =torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=snake_case_ , )
else:
lowercase =torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def _A( self ):
lowercase =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase =model.to(snake_case_ )
lowercase =BeitImageProcessor(do_resize=snake_case_ , size=6_40 , do_center_crop=snake_case_ )
lowercase =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase =Image.open(ds[0]['''file'''] )
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ )
lowercase =outputs.logits.detach().cpu()
lowercase =image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_00, 3_00)] )
lowercase =torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , snake_case_ )
lowercase =image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
lowercase =torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 145 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase =torch.load(lowercase_ , map_location='''cpu''' )
if "model" in sd.keys():
lowercase =torch.load(lowercase_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
lowercase =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
lowercase ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase =sd.pop(lowercase_ )
lowercase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase =sd[key]
# We split QKV in separate Q,K,V
lowercase =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
lowercase =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
lowercase =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
lowercase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase , lowercase , lowercase =torch.split(lowercase_ , depth // 3 , dim=0 )
lowercase =q
lowercase =k
lowercase =v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None ) -> Optional[int]:
'''simple docstring'''
lowercase =load_checkpoint(lowercase_ )
if config is not None:
lowercase =OPTConfig.from_pretrained(lowercase_ )
else:
lowercase =OPTConfig()
lowercase =OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_UpperCAmelCase : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 145 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : Union[str, Any]=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Dict ):
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=True ):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : Tuple = os.path.join(lowerCamelCase_ , 'tmp' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : str = read_json(os.path.join(lowerCamelCase_ , 'params.json' ) )
__a : str = NUM_SHARDS[model_size]
__a : List[str] = params['n_layers']
__a : Optional[Any] = params['n_heads']
__a : Optional[Any] = n_heads // num_shards
__a : Optional[int] = params['dim']
__a : Union[str, Any] = dim // n_heads
__a : Dict = 10000.0
__a : Tuple = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__a : Tuple = params['n_kv_heads'] # for GQA / MQA
__a : Union[str, Any] = n_heads_per_shard // num_key_value_heads
__a : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
__a : List[str] = n_heads
__a : List[Any] = n_heads_per_shard
__a : List[str] = dim
# permute for sliced rotary
def permute(lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=n_heads , lowerCamelCase_ : Any=dim , lowerCamelCase_ : List[str]=dim ):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_ ).transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__a : Union[str, Any] = torch.load(os.path.join(lowerCamelCase_ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
__a : Any = [
torch.load(os.path.join(lowerCamelCase_ , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(lowerCamelCase_ )
]
__a : Any = 0
__a : List[str] = {'weight_map': {}}
for layer_i in range(lowerCamelCase_ ):
__a : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Dict = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__a : Optional[int] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__a : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) )
__a : List[str] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
__a : List[Any] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ )
__a : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : Union[str, Any] = inv_freq
for k, v in state_dict.items():
__a : Any = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
__a : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Tuple = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
__a : Optional[int] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(lowerCamelCase_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(lowerCamelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
__a : List[Any] = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
# Write configs
__a : Any = {'total_size': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'pytorch_model.bin.index.json' ) )
__a : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
__a : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_5_6
__a : Any = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
__a : List[str] = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
# Initialize the tokenizer based on the `spm` model
__a : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__a : Optional[Any] = tokenizer_class(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : str = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=lowerCamelCase_ , help='Whether or not to save using `safetensors`.' )
__a : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__a : List[str] = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase ( __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : Dict = k.replace(__A , __A )
return k
def lowercase ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : Dict = DEFAULTS.copy()
cfg_kwargs.update(__A )
snake_case : int = PegasusConfig(**__A )
snake_case : List[Any] = PegasusForConditionalGeneration(__A )
snake_case : Optional[Any] = torch_model.model.state_dict()
snake_case : Optional[int] = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
snake_case : Optional[Any] = v.T
snake_case : List[Any] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
snake_case : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Optional[Any] = mapping["""shared.weight"""]
snake_case : Tuple = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
snake_case , snake_case : Union[str, Any] = torch_model.model.load_state_dict(__A , strict=__A )
snake_case : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase ( __A : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(__A )
snake_case : Union[str, Any] = {}
snake_case : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : List[str] = tf.train.load_variable(__A , __A )
snake_case : Optional[Any] = array
return tf_weights
def lowercase ( __A : str , __A : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = Path(__A ).parent.name
snake_case : Dict = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
snake_case : Any = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
snake_case : Dict = get_tf_weights_as_numpy(__A )
snake_case : List[Any] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
snake_case : Optional[int] = task_specific_params
snake_case : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : List[Any] = parser.parse_args()
if args.save_dir is None:
__lowercase : Optional[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Union[str, Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 36 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__, lowerCamelCase__ ) )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self , __a , __a , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , **__a , ) -> str:
'''simple docstring'''
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else bos_token
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else sep_token
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else cls_token
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''') as vocab_handle:
_UpperCamelCase = json.load(UpperCamelCase_)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''') as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''')[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in bpe_merges]
_UpperCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(UpperCamelCase_)
_UpperCamelCase = get_pairs(UpperCamelCase_)
if not pairs:
return token
while True:
_UpperCamelCase = min(UpperCamelCase_ , key=lambda __a: self.bpe_ranks.get(UpperCamelCase_ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(UpperCamelCase_):
try:
_UpperCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCamelCase = j
if word[i] == first and i < len(UpperCamelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(UpperCamelCase_)
_UpperCamelCase = new_word
if len(UpperCamelCase_) == 1:
break
else:
_UpperCamelCase = get_pairs(UpperCamelCase_)
_UpperCamelCase = " ".join(UpperCamelCase_)
_UpperCamelCase = word
return word
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = []
for token in re.findall(self.pat , UpperCamelCase_):
_UpperCamelCase = "".join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_).split(''' '''))
return bpe_tokens
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
return self.decoder.get(UpperCamelCase_)
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = "".join(UpperCamelCase_)
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCAmelCase ( self , __a , __a = None) -> Tuple:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_) + '''\n''')
_UpperCamelCase = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''')
_UpperCamelCase = token_index
writer.write(''' '''.join(UpperCamelCase_) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , __a , __a = None) -> Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __a , __a = None , __a = False) -> Tuple:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_)) + [1]
return [1] + ([0] * len(UpperCamelCase_)) + [1, 1] + ([0] * len(UpperCamelCase_)) + [1]
def UpperCAmelCase ( self , __a , __a = None) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase ( self , __a , __a=False , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_) > 0 and not text[0].isspace()):
_UpperCamelCase = " " + text
return (text, kwargs)
| 710 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = pos
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , __a)
self.top_to_bottom(__a , __a , __a , __a)
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , __a)
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , __a)
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , 0)
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = len(__a) // 2 - 1
for i in range(__a , -1 , -1):
self.top_to_bottom(__a , __a , len(__a) , __a)
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a) , __a)
return temp
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case, __snake_case )
for _ in range(1, len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input("""Enter number of edges: """).strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 78 | 0 |
import operator as op
UpperCAmelCase_ = """scaler.pt"""
UpperCAmelCase_ = """pytorch_model"""
UpperCAmelCase_ = """random_states"""
UpperCAmelCase_ = """optimizer"""
UpperCAmelCase_ = """scheduler"""
UpperCAmelCase_ = """pytorch_model.bin"""
UpperCAmelCase_ = """pytorch_model.bin.index.json"""
UpperCAmelCase_ = """model.safetensors"""
UpperCAmelCase_ = """model.safetensors.index.json"""
UpperCAmelCase_ = """1.10.2"""
UpperCAmelCase_ = """py38"""
UpperCAmelCase_ = """4.17.0"""
UpperCAmelCase_ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
UpperCAmelCase_ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
UpperCAmelCase_ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
UpperCAmelCase_ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
UpperCAmelCase_ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
UpperCAmelCase_ = """2.0.1"""
UpperCAmelCase_ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
UpperCAmelCase_ = ["""default""", """reduce-overhead""", """max-autotune"""]
UpperCAmelCase_ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase_ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
UpperCAmelCase_ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
UpperCAmelCase_ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""] | 458 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = False, ) -> Any:
"""simple docstring"""
super().__init__()
lowercase_ : str = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : Dict = nn.Embedding(snake_case__, snake_case__ )
lowercase_ : List[str] = False
lowercase_ : Optional[int] = nn.Dropout(p=snake_case__ )
lowercase_ : Any = TaConfig(
vocab_size=snake_case__, d_model=snake_case__, num_heads=snake_case__, d_kv=snake_case__, d_ff=snake_case__, dropout_rate=snake_case__, feed_forward_proj=snake_case__, is_decoder=snake_case__, is_encoder_decoder=snake_case__, )
lowercase_ : List[Any] = nn.ModuleList()
for lyr_num in range(snake_case__ ):
lowercase_ : List[str] = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
lowercase_ : Tuple = TaLayerNorm(snake_case__ )
lowercase_ : Dict = nn.Dropout(p=snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.token_embedder(snake_case__ )
lowercase_ : Tuple = encoder_input_tokens.shape[1]
lowercase_ : List[str] = torch.arange(snake_case__, device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
lowercase_ : Union[str, Any] = self.dropout_pre(snake_case__ )
# inverted the attention mask
lowercase_ : List[str] = encoder_input_tokens.size()
lowercase_ : Union[str, Any] = self.get_extended_attention_mask(snake_case__, snake_case__ )
for lyr in self.encoders:
lowercase_ : Optional[Any] = lyr(snake_case__, snake_case__ )[0]
lowercase_ : Dict = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask | 458 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__A = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class a_ ( UpperCamelCase_ ):
def __init__(self , **__a) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__(self , __a , **__a) -> Dict:
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__)
def SCREAMING_SNAKE_CASE__ (self , **__a) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = {}
if "candidate_labels" in kwargs:
__snake_case : Dict = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ (self , __a , __a=None , __a="This is a photo of {}.") -> Dict:
"""simple docstring"""
__snake_case : List[str] = load_image(UpperCamelCase__)
__snake_case : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework)
__snake_case : Optional[Any] = candidate_labels
__snake_case : List[Any] = [hypothesis_template.format(UpperCamelCase__) for x in candidate_labels]
__snake_case : Dict = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__)
__snake_case : Optional[int] = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ (self , __a) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = model_inputs.pop('candidate_labels')
__snake_case : Optional[int] = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , UpperCamelCase__):
__snake_case : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__snake_case : int = text_inputs[0][0]
__snake_case : Any = self.model(**UpperCamelCase__ , **UpperCamelCase__)
__snake_case : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ (self , __a) -> int:
"""simple docstring"""
__snake_case : str = model_outputs.pop('candidate_labels')
__snake_case : str = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case : Optional[int] = logits.softmax(dim=-1).squeeze(-1)
__snake_case : List[str] = probs.tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__snake_case : str = [scores]
elif self.framework == "tf":
__snake_case : Optional[int] = stable_softmax(UpperCamelCase__ , axis=-1)
__snake_case : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
__snake_case : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__) , key=lambda __a: -x[0])
]
return result | 712 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
UpperCAmelCase_= Dataset.from_dict(lowerCAmelCase_ )
return dataset
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= get_dataset()
UpperCAmelCase_= make_duplicate_clusters(__UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_= get_dataset()
UpperCAmelCase_, UpperCAmelCase_= deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __UpperCAmelCase )
| 593 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[str] = "detr"
a__ : Union[str, Any] = ["past_key_values"]
a__ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , __UpperCAmelCase : int=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=100 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : str=2_048 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : Any=6 , __UpperCAmelCase : List[str]=2_048 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : str=256 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Dict=1.0 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int="sine" , __UpperCAmelCase : List[Any]="resnet50" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : str=5 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.1 , **__UpperCAmelCase : Tuple , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase_= CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= backbone_config.get("""model_type""" )
UpperCAmelCase_= CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_= config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= None, None, None
UpperCAmelCase_= use_timm_backbone
UpperCAmelCase_= backbone_config
UpperCAmelCase_= num_channels
UpperCAmelCase_= num_queries
UpperCAmelCase_= d_model
UpperCAmelCase_= encoder_ffn_dim
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= encoder_attention_heads
UpperCAmelCase_= decoder_ffn_dim
UpperCAmelCase_= decoder_layers
UpperCAmelCase_= decoder_attention_heads
UpperCAmelCase_= dropout
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= activation_dropout
UpperCAmelCase_= activation_function
UpperCAmelCase_= init_std
UpperCAmelCase_= init_xavier_std
UpperCAmelCase_= encoder_layerdrop
UpperCAmelCase_= decoder_layerdrop
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= auxiliary_loss
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= backbone
UpperCAmelCase_= use_pretrained_backbone
UpperCAmelCase_= dilation
# Hungarian matcher
UpperCAmelCase_= class_cost
UpperCAmelCase_= bbox_cost
UpperCAmelCase_= giou_cost
# Loss coefficients
UpperCAmelCase_= mask_loss_coefficient
UpperCAmelCase_= dice_loss_coefficient
UpperCAmelCase_= bbox_loss_coefficient
UpperCAmelCase_= giou_loss_coefficient
UpperCAmelCase_= eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Union[str, Any] ) -> Dict:
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, any]:
UpperCAmelCase_= copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_= self.backbone_config.to_dict()
UpperCAmelCase_= self.__class__.model_type
return output
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Union[str, Any] = version.parse("1.11")
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 593 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "EncodecFeatureExtractor"
__UpperCAmelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = self.feature_extractor
__snake_case : Optional[Any] = False
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : str = kwargs.pop('audio' , _UpperCAmelCase )
__snake_case : Dict = kwargs.pop('sampling_rate' , _UpperCAmelCase )
__snake_case : Dict = kwargs.pop('text' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : str = args[0]
__snake_case : int = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__snake_case : Any = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if audio is not None:
__snake_case : List[str] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__snake_case : int = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__snake_case : Union[str, Any] = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Tuple = kwargs.pop('audio' , _UpperCAmelCase )
__snake_case : Optional[int] = kwargs.pop('padding_mask' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : Optional[Any] = args[0]
__snake_case : List[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(_UpperCAmelCase , padding_mask=_UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Optional[int] = to_numpy(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_UpperCAmelCase )
__snake_case : int = to_numpy(_UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__snake_case : Optional[Any] = seq_len - padding_mask.shape[-1]
__snake_case : Optional[int] = 1 - self.feature_extractor.padding_value
__snake_case : List[Any] = np.pad(_UpperCAmelCase , ((0, 0), (0, difference)) , 'constant' , constant_values=_UpperCAmelCase )
__snake_case : Tuple = audio_values.tolist()
for i in range(_UpperCAmelCase ):
__snake_case : int = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__snake_case : List[str] = sliced_audio.reshape(_UpperCAmelCase , -1 )
return audio_values
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__a : int = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __a ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = {}
lowercase__ : Optional[int] = {}
if prompt is not None:
lowercase__ : Union[str, Any] = prompt
if generate_kwargs is not None:
lowercase__ : Tuple = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase__ : List[str] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
lowercase__ : Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = load_image(lowerCamelCase )
if prompt is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
f"""Received an invalid text input, got - {type(lowerCamelCase )} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation." )
lowercase__ : Union[str, Any] = self.model.config.model_type
if model_type == "git":
lowercase__ : List[str] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
lowercase__ : Optional[int] = self.tokenizer(text=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids
lowercase__ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase__ : Tuple = torch.tensor(lowerCamelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
lowercase__ : int = self.image_processor(images=lowerCamelCase , header_text=lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase__ : Dict = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
lowercase__ : List[Any] = self.tokenizer(lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase__ : Optional[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase__ : Tuple = None
return model_inputs
def __a ( self , lowerCamelCase , lowerCamelCase=None ) -> int:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
lowercase__ : List[str] = None
if generate_kwargs is None:
lowercase__ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase__ : int = model_inputs.pop(self.model.main_input_name )
lowercase__ : Any = self.model.generate(lowerCamelCase , **lowerCamelCase , **lowerCamelCase )
return model_outputs
def __a ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = []
for output_ids in model_outputs:
lowercase__ : List[Any] = {
"generated_text": self.tokenizer.decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , )
}
records.append(lowerCamelCase )
return records | 397 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase=0.01 , lowerCamelCase=1000 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = p_stop
lowercase__ : Any = max_length
def __iter__( self ) -> Dict:
"""simple docstring"""
lowercase__ : str = 0
lowercase__ : Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase__ : Optional[Any] = random.random() < self.p_stop
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = [
BatchSamplerShard(lowerCamelCase , 2 , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
for i in range(2 )
]
lowercase__ : str = [list(lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase ) for shard in batch_sampler_shards] , [len(lowerCamelCase ) for e in expected] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
lowercase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase__ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
lowercase__ : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase__ : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
lowercase__ : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
lowercase__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase__ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
lowercase__ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
lowercase__ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase__ : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
lowercase__ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase__ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
lowercase__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase__ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
lowercase__ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase__ : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase__ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase__ : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase )
lowercase__ : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , even_batches=lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase__ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase__ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase__ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
lowercase__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Dict = [[], []]
self.check_batch_sampler_shards(lowerCamelCase , lowerCamelCase , split_batches=lowerCamelCase , even_batches=lowerCamelCase )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase__ : List[str] = [BatchSamplerShard(lowerCamelCase , 2 , lowerCamelCase , even_batches=lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False ) -> List[Any]:
"""simple docstring"""
random.seed(lowerCamelCase )
lowercase__ : List[Any] = list(lowerCamelCase )
lowercase__ : List[str] = [
IterableDatasetShard(
lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase , num_processes=lowerCamelCase , process_index=lowerCamelCase , split_batches=lowerCamelCase , )
for i in range(lowerCamelCase )
]
lowercase__ : int = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase )
iterable_dataset_lists.append(list(lowerCamelCase ) )
lowercase__ : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase__ : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(len(lowerCamelCase ) % shard_batch_size == 0 )
lowercase__ : Optional[int] = []
for idx in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase ) < len(lowerCamelCase ):
reference += reference
self.assertListEqual(lowerCamelCase , reference[: len(lowerCamelCase )] )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = 42
lowercase__ : Optional[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
# Edge case with a very small dataset
lowercase__ : str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
self.check_iterable_dataset_shards(lowerCamelCase , lowerCamelCase , batch_size=4 , drop_last=lowerCamelCase , split_batches=lowerCamelCase )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase )
lowercase__ : Optional[int] = SkipBatchSampler(lowerCamelCase , 2 )
self.assertListEqual(list(lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase__ : List[Any] = skip_first_batches(lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __a ( self ) -> List[str]:
"""simple docstring"""
Accelerator()
lowercase__ : List[str] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 397 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
snake_case__ : Any = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Dict , **__a : int ) ->List[str]:
super().__init__(**__a )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(__a )
def __call__( self : Optional[Any] , __a : Union[str, "Image.Image", List[Dict[str, Any]]] , __a : Union[str, List[str]] = None , **__a : Any , ) ->Optional[Any]:
if "text_queries" in kwargs:
lowerCamelCase_ : List[str] = kwargs.pop("""text_queries""" )
if isinstance(__a , (str, Image.Image) ):
lowerCamelCase_ : Tuple = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowerCamelCase_ : int = image
lowerCamelCase_ : Optional[int] = super().__call__(__a , **__a )
return results
def _lowerCAmelCase ( self : int , **__a : Union[str, Any] ) ->Optional[int]:
lowerCamelCase_ : List[str] = {}
if "threshold" in kwargs:
lowerCamelCase_ : Tuple = kwargs["""threshold"""]
if "top_k" in kwargs:
lowerCamelCase_ : List[str] = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _lowerCAmelCase ( self : Union[str, Any] , __a : Dict ) ->Optional[Any]:
lowerCamelCase_ : Dict = load_image(inputs["""image"""] )
lowerCamelCase_ : List[str] = inputs["""candidate_labels"""]
if isinstance(__a , __a ):
lowerCamelCase_ : str = candidate_labels.split(""",""" )
lowerCamelCase_ : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__a ):
lowerCamelCase_ : List[Any] = self.tokenizer(__a , return_tensors=self.framework )
lowerCamelCase_ : int = self.image_processor(__a , return_tensors=self.framework )
yield {
"is_last": i == len(__a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) ->Optional[int]:
lowerCamelCase_ : str = model_inputs.pop("""target_size""" )
lowerCamelCase_ : Optional[Any] = model_inputs.pop("""candidate_label""" )
lowerCamelCase_ : Dict = model_inputs.pop("""is_last""" )
lowerCamelCase_ : Any = self.model(**__a )
lowerCamelCase_ : Optional[int] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Any=0.1 , __a : Union[str, Any]=None ) ->Any:
lowerCamelCase_ : Optional[Any] = []
for model_output in model_outputs:
lowerCamelCase_ : Dict = model_output["""candidate_label"""]
lowerCamelCase_ : Any = BaseModelOutput(__a )
lowerCamelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(
outputs=__a , threshold=__a , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowerCamelCase_ : List[str] = outputs["""scores"""][index].item()
lowerCamelCase_ : Dict = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowerCamelCase_ : str = {"""score""": score, """label""": label, """box""": box}
results.append(__a )
lowerCamelCase_ : str = sorted(__a , key=lambda __a : x["score"] , reverse=__a )
if top_k:
lowerCamelCase_ : Any = results[:top_k]
return results
def _lowerCAmelCase ( self : Union[str, Any] , __a : "torch.Tensor" ) ->Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Tuple = box.int().tolist()
lowerCamelCase_ : int = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 171 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
_a = MODEL_FOR_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) ->int:
lowerCamelCase_ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : List[str] = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowerCamelCase_ : List[str] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowerCamelCase_ : Any = text_generator("""This is a test""" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
] , )
lowerCamelCase_ : List[Any] = text_generator.model.config.eos_token_id
lowerCamelCase_ : Tuple = """<pad>"""
lowerCamelCase_ : Union[str, Any] = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
] , )
@require_tf
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
lowerCamelCase_ : int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : Dict = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowerCamelCase_ : str = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCAmelCase ( self : int , __a : Any , __a : List[str] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : List[str] = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
lowerCamelCase_ : str = """Hello I believe in"""
lowerCamelCase_ : List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_ : Dict = text_generator(__a )
self.assertEqual(
__a , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowerCamelCase_ : Union[str, Any] = text_generator(__a , stop_sequence=""" fe""" )
self.assertEqual(__a , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = text_generator.model
lowerCamelCase_ : Any = text_generator.tokenizer
lowerCamelCase_ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Optional[Any] = pipeline(task="""text-generation""" , model=__a , tokenizer=__a , return_full_text=__a )
lowerCamelCase_ : Any = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
with self.assertRaises(__a ):
lowerCamelCase_ : Dict = text_generator("""test""" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Union[str, Any] = text_generator("""test""" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Optional[Any] = text_generator("""test""" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ : str = text_generator("""""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ : Union[str, Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ : str = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
lowerCamelCase_ : Optional[int] = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
import torch
# Classic `model_kwargs`
lowerCamelCase_ : Any = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : int = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ : List[str] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
import torch
lowerCamelCase_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : List[str] ) ->Optional[int]:
import torch
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__a , top_p=0.5 )
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = """Hello world"""
lowerCamelCase_ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
lowerCamelCase_ : str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
lowerCamelCase_ : int = logging.get_logger("""transformers.generation.utils""" )
lowerCamelCase_ : List[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Any = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : int = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Optional[int] = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 171 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Union[str, Any] = ["text", "image", "audio"]
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Dict =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
inputs.append(create_inputs(UpperCAmelCase__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( UpperCamelCase : List ):
"""simple docstring"""
A__ : str =[]
for output in outputs:
if isinstance(UpperCAmelCase__ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(UpperCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(UpperCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __lowerCAmelCase :
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
A__ : List[str] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowercase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A__ : Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCAmelCase ( self : str ):
A__ : Any =create_inputs(self.tool.inputs )
A__ : List[str] =self.tool(*lowercase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
A__ : Any =[outputs]
self.assertListEqual(output_types(lowercase_ ) , self.tool.outputs )
def _UpperCAmelCase ( self : Tuple ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _UpperCAmelCase ( self : Dict ):
A__ : List[Any] =create_inputs(self.tool.inputs )
A__ : Optional[int] =self.tool(*lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
A__ : List[Any] =[outputs]
self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowercase_ , self.tool.outputs ):
A__ : int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
def _UpperCAmelCase ( self : Any ):
A__ : int =create_inputs(self.tool.inputs )
A__ : Dict =[]
for _input, input_type in zip(lowercase_ , self.tool.inputs ):
if isinstance(lowercase_ , lowercase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A__ : Dict =self.tool(*lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
A__ : List[str] =[outputs]
self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
| 708 | """simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : Optional[int] = get_logger(__name__)
__A : Dict = Path(__file__).parent / "model_card_template.md"
__A : Dict = uuida().hex
__A : Union[str, Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A : List[str] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowercase ( UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A__ : Union[str, Any] =F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
return ua
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A__ : Tuple =HfFolder.get_token()
if organization is None:
A__ : Tuple =whoami(UpperCamelCase )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : str ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(UpperCamelCase , "local_rank" ) and args.local_rank not in [-1, 0]:
return
A__ : int =args.hub_token if hasattr(UpperCamelCase , "hub_token" ) else None
A__ : Dict =get_full_repo_name(UpperCamelCase , token=UpperCamelCase )
A__ : Any =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase , model_name=UpperCamelCase , repo_name=UpperCamelCase , dataset_name=args.dataset_name if hasattr(UpperCamelCase , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
A__ : str =os.path.join(args.output_dir , "README.md" )
model_card.save(UpperCamelCase )
def lowercase ( UpperCamelCase : Optional[str] , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A__ : Any =str(Path(UpperCamelCase ).as_posix() )
A__ : List[Any] =re.search(R"snapshots/([^/]+)/" , UpperCamelCase )
if search is None:
return None
A__ : List[Any] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Optional[Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A : Dict = os.path.join(hf_cache_home, "diffusers")
def lowercase ( UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A__ : List[Any] =DIFFUSERS_CACHE
if old_cache_dir is None:
A__ : Optional[Any] =old_diffusers_cache
A__ : int =Path(UpperCamelCase ).expanduser()
A__ : Any =Path(UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A__ : List[Any] =new_cache_dir / old_blob_path.relative_to(UpperCamelCase )
new_blob_path.parent.mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
os.replace(UpperCamelCase , UpperCamelCase )
try:
os.symlink(UpperCamelCase , UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : List[str] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A : str = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[Any] = int(f.read())
except ValueError:
__A : List[Any] = 0
if cache_version < 1:
__A : str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A : str = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A__ : Dict =weights_name.split("." )
A__ : List[str] =splits[:-1] + [variant] + splits[-1:]
A__ : str =".".join(UpperCamelCase )
return weights_name
def lowercase ( UpperCamelCase : List[str] , *,
UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=None , ):
"""simple docstring"""
A__ : Optional[int] =str(UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase ):
if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A__ : Tuple =os.path.join(UpperCamelCase , UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ):
A__ : str =os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A__ : Any =hf_hub_download(
UpperCamelCase , filename=_add_variant(UpperCamelCase , UpperCamelCase ) , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , UpperCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase , UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase , UpperCamelCase )}\' so that the correct variant file can be added.''' , UpperCamelCase , )
try:
# 2. Load model file as usual
A__ : List[Any] =hf_hub_download(
UpperCamelCase , filename=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 595 | 0 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __a ( _UpperCamelCase: Tuple ) -> List[Any]:
"""simple docstring"""
_snake_case = np.max(A_ , axis=-1 , keepdims=A_ )
_snake_case = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A_ )
class _a ( UpperCamelCase__ ):
def _lowercase ( self ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
_snake_case = {}
if "second_text" in kwargs:
_snake_case = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Any:
return self.tokenizer(__lowerCamelCase ,text_pair=__lowerCamelCase ,return_tensors=self.framework )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> int:
return self.model(**__lowerCamelCase )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
_snake_case = model_outputs.logits[0].numpy()
_snake_case = softmax(__lowerCamelCase )
_snake_case = np.argmax(__lowerCamelCase )
_snake_case = self.model.config.idalabel[best_class]
_snake_case = probabilities[best_class].item()
_snake_case = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 185 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__: Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__: Tuple = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
A__: List[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ElectraTokenizer
def __init__( self: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=True , __lowerCamelCase: Dict="[UNK]" , __lowerCamelCase: Tuple="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: str="[CLS]" , __lowerCamelCase: List[str]="[MASK]" , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase__: Any = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
UpperCamelCase__: Union[str, Any] = do_lower_case
UpperCamelCase__: str = strip_accents
UpperCamelCase__: Any = tokenize_chinese_chars
UpperCamelCase__: Optional[Any] = normalizer_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple=None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 380 | 0 |
"""simple docstring"""
import functools
def UpperCamelCase ( _A , _A ) -> int:
# Validation
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_A ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowercase : List[Any] = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = [True] * 1_00_00_01
_lowerCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
_lowerCAmelCase = False
i += 1
def UpperCamelCase ( _A ) -> bool:
return seive[n]
def UpperCamelCase ( _A ) -> bool:
return any(digit in """02468""" for digit in str(_A ) )
def UpperCamelCase ( _A = 1_000_000 ) -> list[int]:
lowercase : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
lowercase : str = str(_A )
lowercase : int = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 348 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( a_ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 193 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
elif name.split('.' )[0] == "proj":
lowerCAmelCase__ = fairseq_model.proj
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(snake_case__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.split(' ' )[0] for line in lines]
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(snake_case__ )
lowerCAmelCase__ = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCAmelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ = WavaVecaModel(snake_case__ )
lowerCAmelCase__ = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
lowerCAmelCase__ = SpeechaTextaForCausalLM(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCAmelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCAmelCase__ = False
# add projection layer
lowerCAmelCase__ = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase__ = SpeechaTextaTokenizer(os.path.join(snake_case__ , 'vocab.json' ) )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase__ = hf_wavavec.config.to_dict()
lowerCAmelCase__ = tokenizer.pad_token_id
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = 'speech_to_text_2'
lowerCAmelCase__ = 'wav2vec2'
lowerCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 193 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCAmelCase( __A ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(__A )
UpperCAmelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__A , __A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__A , "__name__" , __A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module("transformers" )
if hasattr(__A , __A ):
return getattr(__A , __A )
return None
def _lowerCAmelCase( __A , __A = None , __A = False , __A = False , __A = None , __A = None , __A = None , __A = False , **__A , ):
UpperCAmelCase = get_file_from_repo(
__A , __A , cache_dir=__A , force_download=__A , resume_download=__A , proxies=__A , use_auth_token=__A , revision=__A , local_files_only=__A , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__A , encoding="utf-8" ) as reader:
return json.load(__A )
class __magic_name__ :
def __init__( self : Union[str, Any] ) -> str:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase__ )
def _UpperCamelCase ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any ) -> str:
UpperCAmelCase = kwargs.pop("config" , lowerCAmelCase__ )
UpperCAmelCase = kwargs.pop("trust_remote_code" , lowerCAmelCase__ )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = config_dict.get("feature_extractor_type" , lowerCAmelCase__ )
UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# It could be in `config.feature_extractor_type``
UpperCAmelCase = getattr(lowerCAmelCase__ , "feature_extractor_type" , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCAmelCase = feature_extractor_class_from_name(lowerCAmelCase__ )
UpperCAmelCase = feature_extractor_auto_map is not None
UpperCAmelCase = feature_extractor_class is not None or type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = kwargs.pop("code_revision" , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase__ )]
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def _UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCAmelCase__ )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(UpperCAmelCase__ ):
for col in range(UpperCAmelCase__ ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ ,UpperCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,UpperCAmelCase__ ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,UpperCAmelCase__ ):
for row in range(UpperCAmelCase__ ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase__ ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(UpperCAmelCase__ )
]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCAmelCase__ )
_UpperCAmelCase = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
_UpperCAmelCase = [[0] for _ in range(UpperCAmelCase__ )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(UpperCAmelCase__ ):
for col in range(UpperCAmelCase__ ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(UpperCAmelCase__ ,UpperCAmelCase__ )
def interpolated_func(lowercase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase__ ) )
return interpolated_func
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __UpperCAmelCase ( lowercase = question_function ,lowercase = 10 ):
"""simple docstring"""
_UpperCAmelCase = [func(UpperCAmelCase__ ) for x_val in range(1 ,order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ):
x_val += 1
ret += poly(UpperCAmelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__:
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : int=1 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = num_groups
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> str:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _a ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
| 482 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a__ ( _lowercase ):
__magic_name__ : List[Any] = "mobilenet_v2"
def __init__(self : str, __UpperCAmelCase : int=3, __UpperCAmelCase : Optional[Any]=224, __UpperCAmelCase : Dict=1.0, __UpperCAmelCase : Union[str, Any]=8, __UpperCAmelCase : int=8, __UpperCAmelCase : Union[str, Any]=6, __UpperCAmelCase : Any=32, __UpperCAmelCase : int=True, __UpperCAmelCase : Optional[Any]=True, __UpperCAmelCase : int="relu6", __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : List[str]=0.8, __UpperCAmelCase : Union[str, Any]=0.02, __UpperCAmelCase : List[Any]=0.001, __UpperCAmelCase : List[str]=255, **__UpperCAmelCase : Dict, ) -> str:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = depth_multiplier
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisible_by
SCREAMING_SNAKE_CASE : Any = min_depth
SCREAMING_SNAKE_CASE : str = expand_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Union[str, Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE : Union[str, Any] = finegrained_output
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = tf_padding
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class a__ ( _lowercase ):
__magic_name__ : Union[str, Any] = version.parse("1.11" )
@property
def lowercase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowercase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowercase__ (self : List[str] ) -> float:
"""simple docstring"""
return 1e-4
| 710 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
return x + 2
class a__ ( unittest.TestCase ):
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3'''
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
SCREAMING_SNAKE_CASE : str = '''x = y'''
SCREAMING_SNAKE_CASE : int = {'''y''': 5}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 3'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE : Any = {'''x''': 8}
SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} )
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, [3, 5] )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''y = x'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase )
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
| 355 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_a : List[str] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCamelCase__ ( _A: Tuple = "mumbai" ):
'''simple docstring'''
__lowerCamelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__lowerCamelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__lowerCamelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 479 |
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase__ = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
UpperCAmelCase__ = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
UpperCAmelCase__ = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def a_ (__A , __A , __A , __A , __A = None , __A = False , ) -> Dict:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
__a : str = new_id
# turn into Numpy arrays
__a : Union[str, Any] = np.array(__A )
__a : Any = np.array(__A )
if reduce_labels:
__a : Dict = 255
__a : Union[str, Any] = label - 1
__a : int = 255
__a : Optional[Any] = label != ignore_index
__a : int = np.not_equal(__A , __A )
__a : str = pred_label[mask]
__a : List[str] = np.array(__A )[mask]
__a : Optional[int] = pred_label[pred_label == label]
__a : Dict = np.histogram(__A , bins=__A , range=(0, num_labels - 1) )[0]
__a : Union[str, Any] = np.histogram(__A , bins=__A , range=(0, num_labels - 1) )[0]
__a : List[Any] = np.histogram(__A , bins=__A , range=(0, num_labels - 1) )[0]
__a : Optional[int] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a_ (__A , __A , __A , __A , __A = None , __A = False , ) -> Dict:
"""simple docstring"""
__a : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
__a : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
__a : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__a : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__A , __A ):
__a , __a , __a , __a : Dict = intersect_and_union(
__A , __A , __A , __A , __A , __A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a_ (__A , __A , __A , __A , __A = None , __A = None , __A = False , ) -> Optional[int]:
"""simple docstring"""
__a , __a , __a , __a : Optional[int] = total_intersect_and_union(
__A , __A , __A , __A , __A , __A )
# compute metrics
__a : Any = {}
__a : str = total_area_intersect.sum() / total_area_label.sum()
__a : List[Any] = total_area_intersect / total_area_union
__a : Union[str, Any] = total_area_intersect / total_area_label
__a : Optional[int] = np.nanmean(__A )
__a : str = np.nanmean(__A )
__a : List[str] = all_acc
__a : Dict = iou
__a : Union[str, Any] = acc
if nan_to_num is not None:
__a : Tuple = {metric: np.nan_to_num(__A , nan=__A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ (self: List[str] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: Dict , __UpperCAmelCase: int , __UpperCAmelCase: bool , __UpperCAmelCase: Optional[int] = None , __UpperCAmelCase: Optional[Dict[int, int]] = None , __UpperCAmelCase: bool = False , ) -> List[str]:
'''simple docstring'''
__a : str = mean_iou(
results=__UpperCAmelCase , gt_seg_maps=__UpperCAmelCase , num_labels=__UpperCAmelCase , ignore_index=__UpperCAmelCase , nan_to_num=__UpperCAmelCase , label_map=__UpperCAmelCase , reduce_labels=__UpperCAmelCase , )
return iou_result
| 351 | 0 |
from collections import defaultdict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = first_str.lower().strip()
__lowercase = second_str.lower().strip()
# Remove whitespace
__lowercase = first_str.replace(" " , "" )
__lowercase = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
return False
# Default values for count should be 0
__lowercase = defaultdict(_SCREAMING_SNAKE_CASE )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ : int = input("""Enter the first string """).strip()
snake_case__ : Optional[Any] = input("""Enter the second string """).strip()
snake_case__ : List[Any] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 703 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCamelCase__ : Optional[int] = {'''mobilebert-uncased''': 5_12}
UpperCamelCase__ : Optional[int] = {}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = PRETRAINED_INIT_CONFIGURATION
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = MobileBertTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__="[UNK]" ,snake_case__="[SEP]" ,snake_case__="[PAD]" ,snake_case__="[CLS]" ,snake_case__="[MASK]" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,do_lower_case=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,tokenize_chinese_chars=snake_case__ ,strip_accents=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,snake_case__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(snake_case__ ,normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Any = strip_accents
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalizer_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._tokenizer.model.save(snake_case__ ,name=snake_case__ )
return tuple(snake_case__ )
| 105 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = RoCBertTokenizer
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Any = True
UpperCamelCase_ : Dict = filter_non_english
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Any = {}
for i, value in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = i
_UpperCAmelCase : Optional[int] = i
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCAmelCase : Tuple = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(lowerCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCAmelCase : Union[str, Any] = {}
for i, token in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = i
_UpperCAmelCase : List[str] = RoCBertWordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCAmelCase : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_UpperCAmelCase : List[Any] = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , "do_lower_case" ) else False
_UpperCAmelCase : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = ["的", "人", "有"]
_UpperCAmelCase : List[Any] = "".join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCAmelCase : Tuple = tokenizer.encode("你好" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.encode("你是谁" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_UpperCAmelCase : str = "你好,你是谁"
_UpperCAmelCase : str = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.prepare_for_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 257 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | 1 |
import argparse
import json
import subprocess
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
SCREAMING_SNAKE_CASE = subprocess.run(_UpperCAmelCase , shell=_UpperCAmelCase , stdout=subprocess.PIPE)
SCREAMING_SNAKE_CASE = output.stdout.decode('utf-8')
SCREAMING_SNAKE_CASE = json.loads(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCAmelCase)
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w') as fp:
fp.write(json.dumps(_UpperCAmelCase))
if len(_UpperCAmelCase) > 0:
SCREAMING_SNAKE_CASE = '\n'.join([x['name'] for x in offline_runners])
raise ValueError(F'''The following runners are offline:\n{failed}''')
if __name__ == "__main__":
def lowerCamelCase__ (_UpperCAmelCase):
return values.split(',')
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
a_ : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 73 |
'''simple docstring'''
_lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 565 | 0 |
"""simple docstring"""
from torch import nn
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 544 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 544 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
a__ = []
a__ = 1
while len(a ) < 1e6:
constant.append(str(a ) )
i += 1
a__ = ''.join(a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 394 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:jnp.ndarray
SCREAMING_SNAKE_CASE:jnp.ndarray
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
"""simple docstring"""
a__ = self.conv_in(_a )
a__ = nn.silu(_a )
for block in self.blocks:
a__ = block(_a )
a__ = nn.silu(_a )
a__ = self.conv_out(_a )
return embedding
@flax_register_to_config
class _UpperCamelCase ( nn.Module , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = 32
SCREAMING_SNAKE_CASE:int = 4
SCREAMING_SNAKE_CASE:Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE:Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE:Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE:int = 2
SCREAMING_SNAKE_CASE:Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE:Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE:int = 1280
SCREAMING_SNAKE_CASE:float = 0.0
SCREAMING_SNAKE_CASE:bool = False
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE:bool = True
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:str = "rgb"
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , _a ):
"""simple docstring"""
# init input tensors
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ , a__ = jax.random.split(_a )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(_a , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(_a , _a ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(_a , 0 )
a__ = self.time_proj(_a )
a__ = self.time_embedding(_a )
# 2. pre-process
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.conv_in(_a )
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
a__ , a__ = down_block(_a , _a , _a , deterministic=not train )
else:
a__ , a__ = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
a__ = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(_a )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 394 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( UpperCamelCase__ ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "AutoImageProcessor"
lowercase = "AutoTokenizer"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(_a , _a )
A_ = self.image_processor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
A_ = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A = pd.read_csv("sample_data.csv", header=None)
__A = df.shape[:1][0]
# If you're using some other dataset input the target column
__A = df.iloc[:, 1:2]
__A = actual_data.values.reshape(len_data, 1)
__A = MinMaxScaler().fit_transform(actual_data)
__A = 10
__A = 5
__A = 20
__A = len_data - periods * look_back
__A = actual_data[:division]
__A = actual_data[division - look_back :]
__A , __A = [], []
__A , __A = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A = np.array(train_x)
__A = np.array(test_x)
__A = np.array([list(i.ravel()) for i in train_y])
__A = np.array([list(i.ravel()) for i in test_y])
__A = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__A = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__A = model.predict(x_test)
| 469 |
from sklearn.metrics import mean_squared_error
import datasets
__A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__A = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__A = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="uniform_average" , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = mean_squared_error(
lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ )
return {"mse": mse}
| 469 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "sew"
def __init__( self: str , UpperCamelCase: Union[str, Any]=32 , UpperCamelCase: Optional[int]=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: List[str]=12 , UpperCamelCase: Any=30_72 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: str=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Tuple=0.0 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: int=0.1 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: Optional[Any]=1e-5 , UpperCamelCase: List[Any]="group" , UpperCamelCase: Tuple="gelu" , UpperCamelCase: Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase: str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase: Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Optional[Any]=1_28 , UpperCamelCase: List[Any]=16 , UpperCamelCase: int=True , UpperCamelCase: str=0.05 , UpperCamelCase: Optional[int]=10 , UpperCamelCase: List[Any]=2 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Tuple=10 , UpperCamelCase: Tuple=0 , UpperCamelCase: str="mean" , UpperCamelCase: Any=False , UpperCamelCase: List[str]=False , UpperCamelCase: List[str]=2_56 , UpperCamelCase: Tuple=0 , UpperCamelCase: List[Any]=1 , UpperCamelCase: List[Any]=2 , **UpperCamelCase: List[str] , ) -> Dict:
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = squeeze_factor
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# sequence classification
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 372 |
from __future__ import annotations
def a_ ( _A ) -> bool:
"""simple docstring"""
return len(set(_A ) ) == len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 | 1 |
import argparse
import os
import re
UpperCamelCase_ : str = """src/transformers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ : Optional[int] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ : List[Any] = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ : Optional[int] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ : Tuple = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ : int = re.compile(R"""\[([^\]]+)\]""")
def UpperCamelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[int] = _re_indent.search(_UpperCAmelCase )
return "" if search is None else search.groups()[0]
def UpperCamelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]="" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=None ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = 0
_lowercase : Dict = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_UpperCAmelCase ):
index += 1
_lowercase : Optional[Any] = ["\n".join(lines[:index] )]
else:
_lowercase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : List[Any] = [lines[index]]
index += 1
while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_UpperCAmelCase ) )
if index < len(_UpperCAmelCase ) - 1:
_lowercase : int = [lines[index + 1]]
index += 1
else:
_lowercase : List[Any] = []
else:
blocks.append("\n".join(_UpperCAmelCase ) )
_lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_UpperCAmelCase ) > 0:
blocks.append("\n".join(_UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_UpperCAmelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def UpperCamelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
def _inner(_UpperCAmelCase : Dict ):
return key(_UpperCAmelCase ).lower().replace("_" , "" )
return _inner
def UpperCamelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=None ) -> Optional[Any]:
'''simple docstring'''
def noop(_UpperCAmelCase : int ):
return x
if key is None:
_lowercase : Any = noop
# Constants are all uppercase, they go first.
_lowercase : List[Any] = [obj for obj in objects if key(_UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : Dict = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Any = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()]
_lowercase : Any = ignore_underscore(_UpperCAmelCase )
return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase )
def UpperCamelCase ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
def _replace(_UpperCAmelCase : int ):
_lowercase : Any = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
_lowercase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(_UpperCAmelCase )] ) + "]"
_lowercase : List[str] = import_statement.split("\n" )
if len(_UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Union[str, Any] = 2 if lines[1].strip() == "[" else 1
_lowercase : Union[str, Any] = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Optional[int] = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )
_lowercase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : List[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : List[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Union[str, Any] = keys[:-1]
_lowercase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(_UpperCAmelCase )] )
return "\n".join(_UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : Dict = _re_bracket_content.sub(_replace , _UpperCAmelCase )
return import_statement
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str=True ) -> Tuple:
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
_lowercase : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[Any] = split_code_in_indented_blocks(
_UpperCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Optional[int] = main_blocks[block_idx]
_lowercase : Tuple = block.split("\n" )
# Get to the start of the imports.
_lowercase : List[str] = 0
while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : List[Any] = len(_UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(_UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : Dict = "\n".join(block_lines[line_idx:-1] )
_lowercase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : List[Any] = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : int = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : Any = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[Any] = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None]
_lowercase : Union[str, Any] = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : str = 0
_lowercase : List[str] = []
for i in range(len(_UpperCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : Tuple = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_UpperCAmelCase ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_UpperCAmelCase ) )
def UpperCamelCase ( _UpperCAmelCase : Dict=True ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
_lowercase : int = sort_imports(os.path.join(_UpperCAmelCase , "__init__.py" ) , check_only=_UpperCAmelCase )
if result:
_lowercase : Optional[Any] = [os.path.join(_UpperCAmelCase , "__init__.py" )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Would overwrite {len(_UpperCAmelCase )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 461 |
def UpperCamelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : List[str] = equationa
_lowercase , _lowercase , _lowercase : List[str] = equationa
# Calculate the determinants of the matrices
_lowercase : List[Any] = aa * ba - aa * ba
_lowercase : Tuple = ca * ba - ca * ba
_lowercase : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : str = determinant_x / determinant
_lowercase : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 461 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[Any] = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase ) | 39 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = torch.nn.Linear(10 , 10 )
UpperCAmelCase : Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase : Dict = Accelerator()
UpperCAmelCase : Optional[Any] = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 160 | """simple docstring"""
from __future__ import annotations
_a : str = tuple[int, int, int]
_a : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_a : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_a : Tuple = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_a : int = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_a : List[str] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_a : Optional[Any] = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_a : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_a : Tuple = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_a : Optional[Any] = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_a : Any = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_a : int = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_a : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT ,_lowerCamelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowerCamelCase ) )) < 3:
_lowerCAmelCase : List[Any] = f"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_lowerCamelCase )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = f"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = f"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Dict = f"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
# Validates string and returns dict
_lowerCAmelCase : Any = _plugboard(_lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = f"Plugboard setting isn't type string ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
elif len(_lowerCamelCase ) % 2 != 0:
_lowerCAmelCase : Dict = f"Odd number of symbols ({len(_lowerCamelCase )})"
raise Exception(_lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ ,"""""" )
# Checks if all characters are unique
_lowerCAmelCase : Tuple = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : Any = f"'{i}' not in list of symbols"
raise Exception(_lowerCamelCase )
elif i in tmppbl:
_lowerCAmelCase : str = f"Duplicate symbol ({i})"
raise Exception(_lowerCamelCase )
else:
tmppbl.add(_lowerCamelCase )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[Any] = {}
for j in range(0 ,len(_lowerCamelCase ) - 1 ,2 ):
_lowerCAmelCase : List[str] = pbstring[j + 1]
_lowerCAmelCase : str = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) ,_lowerCamelCase : str = "" ,) -> str:
_lowerCAmelCase : List[Any] = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = _validator(
_lowerCamelCase ,_lowerCamelCase ,plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : List[str] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Optional[int] = rotora[index % len(_lowerCamelCase )]
# rotor rb --------------------------
_lowerCAmelCase : Dict = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# rotor rc --------------------------
_lowerCAmelCase : Any = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : int = rotora[index % len(_lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Optional[int] = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : int = abc[rotora.index(_lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_a : List[str] = 'This is my Python script that emulates the Enigma machine from WWII.'
_a : Optional[Any] = (1, 1, 1)
_a : Optional[int] = 'pictures'
_a : List[Any] = (rotora, rotora, rotora)
_a : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 213 | 0 |
"""simple docstring"""
import operator
def lowercase (SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : list | None = None ) -> list:
SCREAMING_SNAKE_CASE = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE = [arr.pop(0 )]
for i, item in enumerate(SCREAMING_SNAKE_CASE_ ):
if _operator(SCREAMING_SNAKE_CASE_ , sublist[-1] ):
sublist.append(SCREAMING_SNAKE_CASE_ )
arr.pop(SCREAMING_SNAKE_CASE_ )
# merging sublist into solution list
if not solution:
solution.extend(SCREAMING_SNAKE_CASE_ )
else:
while sublist:
SCREAMING_SNAKE_CASE = sublist.pop(0 )
for i, xx in enumerate(SCREAMING_SNAKE_CASE_ ):
if not _operator(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
solution.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
else:
solution.append(SCREAMING_SNAKE_CASE_ )
strand_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 714 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''google/rembert''': 256,
}
__UpperCamelCase = '''▁'''
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RemBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 327 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger()
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : List[nn.Module] = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : list = field(default_factory=lowerCamelCase_ )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Tensor ,__snake_case :Tensor ) -> Union[str, Any]:
a__ = len(list(m.modules() ) ) == 1 or isinstance(__snake_case ,nn.Convad ) or isinstance(__snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self :List[Any] ,__snake_case :Tensor ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__( self :int ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : bool = True
def __call__( self :List[str] ,__snake_case :Tensor ) -> Tuple:
a__ = Tracker(self.dest )(__snake_case ).parametrized
a__ = Tracker(self.src )(__snake_case ).parametrized
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip ,__snake_case ) )
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip ,__snake_case ) )
if len(__snake_case ) != len(__snake_case ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(__snake_case )} operations while'
F' destination module has {len(__snake_case )}.' )
for dest_m, src_m in zip(__snake_case ,__snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class snake_case_ (nn.Module ):
def __init__( self :List[str] ,__snake_case :nn.Module ) -> List[str]:
super().__init__()
a__ = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'Unexpected layer name {k}'
a__ = len(__snake_case ) + 1
feature_blocks.append((F'res{block_index}', v) )
a__ = nn.ModuleDict(__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Tensor ) -> Optional[int]:
return get_trunk_forward_outputs(
__snake_case ,out_feat_keys=__snake_case ,feature_blocks=self._feature_blocks ,)
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> str:
a__ = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self :List[Any] ,__snake_case :str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
a__ = self.convert_name_to_timm(__snake_case )
a__ = partial(lambda: (timm.create_model(__snake_case ,pretrained=__snake_case ).eval(), None) )
else:
a__ = super().__getitem__(__snake_case )
return val
class snake_case_ (lowerCamelCase_ ):
def __getitem__( self :List[Any] ,__snake_case :str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
a__ = RegNetModel
else:
a__ = RegNetForImageClassification
return val
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Tuple[str, str]] ):
for from_key, to_key in keys:
a__ = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Callable[[], nn.Module] , __lowerCAmelCase : Callable[[], nn.Module] , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True , ):
print(F'Converting {name}...' )
with torch.no_grad():
a__ , a__ = from_model_func()
a__ = our_model_func(__lowerCAmelCase ).eval()
a__ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase , raise_if_mismatch=__lowerCAmelCase )
a__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowerCAmelCase )
if from_state_dict is not None:
a__ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a__ = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
a__ = manually_copy_vissl_head(__lowerCAmelCase , our_model.state_dict() , __lowerCAmelCase )
our_model.load_state_dict(__lowerCAmelCase )
a__ = our_model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
a__ = (
our_outputs.logits if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else our_outputs.last_hidden_state
)
a__ = from_model(__lowerCAmelCase )
a__ = from_output[-1] if type(__lowerCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a__ = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , )
a__ = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
a__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__lowerCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , )
print(F'Pushed {name}' )
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ):
a__ = 'imagenet-1k-id2label.json'
a__ = 1_0_0_0
a__ = (1, num_labels)
a__ = 'huggingface/label-files'
a__ = num_labels
a__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
a__ = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
a__ = NameToOurModelFuncMap()
a__ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCAmelCase : str , __lowerCAmelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , model_dir=str(__lowerCAmelCase ) , map_location='cpu' )
a__ = model_func()
# check if we have a head, if yes add it
a__ = files['classy_state_dict']['base_model']['model']
a__ = model_state_dict['trunk']
model.load_state_dict(__lowerCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a__ = partial(
__lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
snake_case : List[str] = parser.parse_args()
snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 335 |
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ) -> Dict:
# test for the above condition
self.test()
def lowerCamelCase__( self :Tuple ) -> int:
a__ = 0
a__ = False
while not completed:
if counter == 1:
self.reset()
a__ = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
a__ , a__ , a__ = self.update(__snake_case )
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def lowerCamelCase__( self :int ) -> str:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> Dict:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :int ,__snake_case :int ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :int ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str=False ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ,__snake_case :List[int] ) -> Optional[Any]:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
a__ = token_ids
a__ = len(self.token_ids )
a__ = -1 # the index of the currently fulfilled step
a__ = False
def lowerCamelCase__( self :int ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :Dict ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
a__ = True
if self.fulfilled_idx == (self.seqlen - 1):
a__ = True
a__ = completed
else:
# failed to make progress.
a__ = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = False
a__ = 0
def lowerCamelCase__( self :str ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int]=False ) -> Tuple:
a__ = PhrasalConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.fulfilled_idx
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :List[str] ,__snake_case :List[List[int]] ,__snake_case :Union[str, Any]=True ) -> int:
a__ = max([len(__snake_case ) for one in nested_token_ids] )
a__ = {}
for token_ids in nested_token_ids:
a__ = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
a__ = {}
a__ = level[token_id]
if no_subsets and self.has_subsets(__snake_case ,__snake_case ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
a__ = root
def lowerCamelCase__( self :Dict ,__snake_case :Any ) -> Optional[int]:
a__ = self.trie
for current_token in current_seq:
a__ = start[current_token]
a__ = list(start.keys() )
return next_tokens
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> List[Any]:
a__ = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> List[str]:
a__ = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Any ,__snake_case :Union[str, Any] ) -> Any:
a__ = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :List[List[int]] ) -> Optional[int]:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(__snake_case ,__snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
a__ = DisjunctiveTrie(__snake_case )
a__ = nested_token_ids
a__ = self.trie.max_height
a__ = []
a__ = False
def lowerCamelCase__( self :Tuple ) -> Any:
a__ = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ) -> Dict:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
a__ = True
else:
a__ = True
self.reset()
a__ = self.trie.reached_leaf(self.current_seq )
a__ = completed
return stepped, completed, reset
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = False
a__ = []
def lowerCamelCase__( self :int ) -> Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__( self :str ,__snake_case :Any=False ) -> Tuple:
a__ = DisjunctiveConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.current_seq
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :Tuple ,__snake_case :List[Constraint] ) -> int:
a__ = constraints
# max # of steps required to fulfill a given constraint
a__ = max([c.seqlen for c in constraints] )
a__ = len(__snake_case )
a__ = False
self.init_state()
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = []
a__ = None
a__ = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def lowerCamelCase__( self :Dict ) -> int:
a__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__( self :Dict ) -> str:
a__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
a__ = constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
else:
a__ = self.inprogress_constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[List[int]] ) -> Tuple:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
a__ , a__ = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> List[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
a__ , a__ = False, False
if self.completed:
a__ = True
a__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
a__ , a__ , a__ = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
a__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
a__ = None
if len(self.pending_constraints ) == 0:
# we're done!
a__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
a__ , a__ , a__ = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(__snake_case )
a__ = None
if not complete and stepped:
a__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
a__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
a__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__( self :int ,__snake_case :Optional[int]=True ) -> Dict:
a__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
a__ = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
a__ = self.inprogress_constraint.copy(stateful=__snake_case )
a__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 335 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCAmelCase_ : Dict = img
UpperCAmelCase_ : Any = img.shape[1]
UpperCAmelCase_ : List[Any] = img.shape[0]
UpperCAmelCase_ : Optional[int] = dst_width
UpperCAmelCase_ : Any = dst_height
UpperCAmelCase_ : Dict = self.src_w / self.dst_w
UpperCAmelCase_ : Union[str, Any] = self.src_h / self.dst_h
UpperCAmelCase_ : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ : Dict = self.img[self.get_y(__snake_case )][self.get_x(__snake_case )]
def _lowerCamelCase ( self : Optional[int] , __snake_case : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def _lowerCamelCase ( self : int , __snake_case : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCamelCase , __UpperCamelCase : Tuple = 800, 600
__UpperCamelCase : Optional[Any] = imread('image_data/lena.jpg', 1)
__UpperCamelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows() | 641 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) | 641 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000000 ):
"""simple docstring"""
lowerCAmelCase__ : int = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
lowerCAmelCase__ : Any = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401 | 0 |
def __UpperCamelCase ( _A , _A , _A ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'data2vec-vision'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=True, UpperCamelCase__=[3, 5, 7, 11], UpperCamelCase__=[1, 2, 3, 6], UpperCamelCase__=True, UpperCamelCase__=0.4, UpperCamelCase__=256, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=255, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 325 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : str ):
"""simple docstring"""
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[Any] , a__ : str , a__ : Tuple=13 , a__ : Dict=64 , a__ : int=2 , a__ : str=3 , a__ : List[Any]="swish" , a__ : Union[str, Any]=3 , a__ : Optional[int]=32 , a__ : List[str]=0.1 , a__ : Optional[Any]=0.0_2 , a__ : Optional[Any]=True , a__ : Any=True , a__ : List[str]=10 , a__ : Tuple=None , a__ : Optional[int]=0.2_5 , a__ : List[str]=0.0 , a__ : Optional[Any]=0.0 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = make_divisible(512 * width_multiplier , divisor=8 )
__snake_case = hidden_act
__snake_case = conv_kernel_size
__snake_case = output_stride
__snake_case = classifier_dropout_prob
__snake_case = use_labels
__snake_case = is_training
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = scope
__snake_case = width_multiplier
__snake_case = ffn_dropout
__snake_case = attn_dropout
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def a (self : int ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a (self : Optional[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any] , a__ : Any ):
"""simple docstring"""
__snake_case = MobileViTVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a (self : Tuple , a__ : Any , a__ : Any , a__ : List[str] , a__ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MobileViTVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Any , a__ : str ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MobileViTVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : str = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : str = False
A_ : Any = False
A_ : List[str] = False
A_ : List[str] = False
def a (self : str ):
"""simple docstring"""
__snake_case = MobileViTVaModelTester(self )
__snake_case = MobileViTVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : str ):
"""simple docstring"""
pass
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def a (self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] ):
__snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case = outputs.hidden_states
__snake_case = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = MobileViTVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : List[Any] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def a (self : Tuple ):
"""simple docstring"""
__snake_case = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_lowerCAmelCase )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def a (self : Tuple ):
"""simple docstring"""
__snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = model.to(_lowerCAmelCase )
__snake_case = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
__snake_case = outputs.logits
# verify the logits
__snake_case = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCAmelCase )
__snake_case = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def a (self : int ):
"""simple docstring"""
__snake_case = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = model.to(_lowerCAmelCase )
__snake_case = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_lowerCAmelCase )
__snake_case = outputs.logits.detach().cpu()
__snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] )
__snake_case = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
__snake_case = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
__snake_case = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 592 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)]
lowerCAmelCase__ = generate_large_matrix()
lowerCAmelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCAmelCase ( lowerCamelCase_) -> None:
assert all(row == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for row in grid)
assert all(list(lowerCamelCase_) == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for col in zip(*lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : List[str] = len(lowerCamelCase_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase__ : int = (left + right) // 2
UpperCamelCase__ : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase__ : Union[str, Any] = mid + 1
else:
UpperCamelCase__ : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = len(grid[0])
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : Dict = find_negative_index(grid[i][:bound])
total += bound
return (len(lowerCamelCase_) * len(grid[0])) - total
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return len([number for row in grid for number in row if number < 0])
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = 0
for row in grid:
for i, number in enumerate(lowerCamelCase_):
if number < 0:
total += len(lowerCamelCase_) - i
break
return total
def __UpperCAmelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks')
UpperCamelCase__ : Optional[int] = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase__ : Any = timeit(f'{func}(grid=grid)' , setup=lowerCamelCase_ , number=500)
print(f'{func}() took {time:0.4f} seconds')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Optional[int] , lowerCamelCase_: int , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
snake_case : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case : int = value
elif weight_type == "weight_g":
snake_case : int = value
elif weight_type == "weight_v":
snake_case : List[str] = value
elif weight_type == "bias":
snake_case : Optional[int] = value
else:
snake_case : List[str] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[Any] = []
snake_case : Tuple = fairseq_model.state_dict()
snake_case : Union[str, Any] = hf_model.feature_extractor
snake_case : Any = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
snake_case : List[str] = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case : Any = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(lowerCamelCase_ )[0].split("." )[-2]
snake_case : int = mapped_key.replace("*" , lowerCamelCase_ )
if "weight_g" in name:
snake_case : Union[str, Any] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Tuple = "bias"
elif "weight" in name:
snake_case : List[str] = "weight"
else:
snake_case : str = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Tuple , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Optional[int] ):
"""simple docstring"""
snake_case : List[str] = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : int = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Tuple , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : str = full_name.split("adaptor." )[-1]
snake_case : List[str] = name.split("." )
if items[1].isdigit():
snake_case : List[Any] = int(items[1] )
else:
snake_case : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
snake_case : Optional[Any] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
snake_case : Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
snake_case : Optional[int] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
snake_case : List[Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
snake_case : Optional[int] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
snake_case : Optional[Any] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
snake_case , snake_case : str = emb.weight.shape
snake_case : Optional[int] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case : Union[str, Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] , lowerCamelCase_: List[Any] , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: List[Any] , lowerCamelCase_: Tuple , lowerCamelCase_: List[str] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: int , lowerCamelCase_: List[str] , lowerCamelCase_: int , ):
"""simple docstring"""
snake_case : Union[str, Any] = WavaVecaConfig.from_pretrained(
lowerCamelCase_ , add_adapter=lowerCamelCase_ , adapter_stride=lowerCamelCase_ , adapter_kernel_size=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , output_hidden_size=lowerCamelCase_ , )
snake_case : int = MBartConfig.from_pretrained(lowerCamelCase_ )
# load model
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
snake_case : Dict = model[0].eval()
# load feature extractor
snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , use_auth_token=lowerCamelCase_ )
# set weights for wav2vec2 encoder
snake_case : Any = WavaVecaModel(lowerCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
# load decoder weights
snake_case : Tuple = MBartForCausalLM(lowerCamelCase_ )
snake_case , snake_case : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case : int = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case : Tuple = False
snake_case : Optional[Any] = MBartaaTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
snake_case : Union[str, Any] = hf_wavavec.config.to_dict()
snake_case : Union[str, Any] = tokenizer.pad_token_id
snake_case : Tuple = tokenizer.bos_token_id
snake_case : List[Any] = tokenizer.eos_token_id
snake_case : Dict = "mbart50"
snake_case : Any = "wav2vec2"
snake_case : Optional[int] = tokenizer.eos_token_id
snake_case : Optional[Any] = 2_5_0_0_0_4
snake_case : int = tokenizer.eos_token_id
snake_case : int = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 449 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = set()
snake_case : Optional[int] = []
def parse_line(lowerCamelCase_: Union[str, Any] ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
snake_case : str = "\n".join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
snake_case : Dict = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
snake_case : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : Dict = set()
snake_case : str = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
return values.split("," )
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A = parser.parse_args()
A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A = extract_warnings(args.output_dir, args.targets)
A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 449 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MgpstrTokenizer
__lowerCAmelCase : Dict = False
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : Optional[Any] = False
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ : str = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase__ : int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_))))
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""")
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = """tester"""
lowercase__ : List[Any] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""")
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Any = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token})
lowercase__ : int = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 1)
lowercase__ : Union[str, Any] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertTrue(special_token not in decoded)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ , lowercase__ : Optional[Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_) , 0)
lowercase__ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual(text_a.replace(""" """ , """""") , SCREAMING_SNAKE_CASE_)
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""")
def lowercase__ ( self):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""")
def lowercase__ ( self):
'''simple docstring'''
pass
| 495 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase ( lowercase_ , lowercase_ = True , lowercase_ = math.inf , lowercase_ = -math.inf , lowercase_ = math.inf , lowercase_ = -math.inf , lowercase_ = False , lowercase_ = 1_00 , lowercase_ = 0.01 , lowercase_ = 1 , ) -> Any:
'''simple docstring'''
lowercase__ : Any = False
lowercase__ : Union[str, Any] = search_prob
lowercase__ : int = start_temperate
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = 0
lowercase__ : Optional[int] = None
while not search_end:
lowercase__ : int = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ : str = current_state
scores.append(lowercase_ )
iterations += 1
lowercase__ : Union[str, Any] = None
lowercase__ : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ : List[str] = random.randint(0 , len(lowercase_ ) - 1 ) # picking a random neighbor
lowercase__ : Optional[int] = neighbors.pop(lowercase_ )
lowercase__ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ : Tuple = picked_neighbor
else:
lowercase__ : Union[str, Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ : Optional[int] = picked_neighbor
lowercase__ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ : Optional[Any] = True
else:
lowercase__ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase_ ) , lowercase_ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ : str = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : List[Any] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ : List[Any] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCamelCase__ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
lowerCamelCase__ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 495 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ):
UpperCamelCase_: Dict = size if size is not None else {'height': 1_8, 'width': 1_8}
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: Any = batch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: List[Any] = min_resolution
UpperCamelCase_: Union[str, Any] = max_resolution
UpperCamelCase_: Dict = do_resize
UpperCamelCase_: Any = size
UpperCamelCase_: str = apply_ocr
def _a ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Dict =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ):
UpperCamelCase_: int = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'apply_ocr' ) )
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
UpperCamelCase_: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
UpperCamelCase_: List[str] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase_: int = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase_: int = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ):
# with apply_OCR = True
UpperCamelCase_: str = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_: List[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
UpperCamelCase_: Dict = Image.open(ds[0]['file'] ).convert('RGB' )
UpperCamelCase_: Any = image_processing(_lowerCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_: int = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
UpperCamelCase_: Optional[Any] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
UpperCamelCase_: Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = image_processing(_lowerCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) | 57 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Any = logging.get_logger(__name__)
lowercase_: Union[str, Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'unispeech-sat'
def __init__( self : Optional[int] , __a : List[Any]=3_2 , __a : Dict=7_6_8 , __a : Dict=1_2 , __a : List[Any]=1_2 , __a : Optional[Any]=3_0_7_2 , __a : Any="gelu" , __a : Optional[Any]=0.1 , __a : Any=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.0 , __a : Tuple=0.0 , __a : List[str]=0.1 , __a : Union[str, Any]=0.1 , __a : int=0.02 , __a : Any=1e-5 , __a : Any="group" , __a : List[Any]="gelu" , __a : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __a : Dict=(5, 2, 2, 2, 2, 2, 2) , __a : List[Any]=(1_0, 3, 3, 3, 3, 2, 2) , __a : Optional[int]=False , __a : List[str]=1_2_8 , __a : Any=1_6 , __a : List[Any]=False , __a : Optional[int]=True , __a : Dict=0.05 , __a : Any=1_0 , __a : List[Any]=2 , __a : Tuple=0.0 , __a : List[Any]=1_0 , __a : Union[str, Any]=0 , __a : Optional[int]=3_2_0 , __a : Dict=2 , __a : Tuple=0.1 , __a : List[Any]=1_0_0 , __a : int=2_5_6 , __a : Dict=2_5_6 , __a : Optional[Any]=0.1 , __a : str="mean" , __a : Optional[Any]=False , __a : Any=False , __a : Optional[int]=2_5_6 , __a : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __a : List[str]=(5, 3, 3, 1, 1) , __a : Optional[Any]=(1, 2, 3, 1, 1) , __a : Union[str, Any]=5_1_2 , __a : int=0 , __a : Dict=1 , __a : Any=2 , __a : Optional[Any]=5_0_4 , **__a : str , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = feat_extract_norm
snake_case__ : Optional[Any] = feat_extract_activation
snake_case__ : Any = list(__a )
snake_case__ : Dict = list(__a )
snake_case__ : Union[str, Any] = list(__a )
snake_case__ : List[str] = conv_bias
snake_case__ : str = num_conv_pos_embeddings
snake_case__ : int = num_conv_pos_embedding_groups
snake_case__ : Dict = len(self.conv_dim )
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : int = hidden_dropout
snake_case__ : Any = attention_dropout
snake_case__ : int = activation_dropout
snake_case__ : int = feat_proj_dropout
snake_case__ : str = final_dropout
snake_case__ : Tuple = layerdrop
snake_case__ : Any = layer_norm_eps
snake_case__ : Tuple = initializer_range
snake_case__ : Tuple = vocab_size
snake_case__ : Tuple = num_clusters
snake_case__ : int = do_stable_layer_norm
snake_case__ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ : Optional[int] = apply_spec_augment
snake_case__ : Optional[int] = mask_time_prob
snake_case__ : Any = mask_time_length
snake_case__ : Dict = mask_time_min_masks
snake_case__ : Any = mask_feature_prob
snake_case__ : Dict = mask_feature_length
snake_case__ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ : Optional[Any] = num_codevectors_per_group
snake_case__ : str = num_codevector_groups
snake_case__ : List[str] = contrastive_logits_temperature
snake_case__ : Tuple = feat_quantizer_dropout
snake_case__ : List[str] = num_negatives
snake_case__ : Dict = codevector_dim
snake_case__ : Tuple = proj_codevector_dim
snake_case__ : List[Any] = diversity_loss_weight
# ctc loss
snake_case__ : Optional[Any] = ctc_loss_reduction
snake_case__ : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ : List[Any] = list(__a )
snake_case__ : Tuple = list(__a )
snake_case__ : Any = list(__a )
snake_case__ : Union[str, Any] = xvector_output_dim
@property
def lowercase ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 127 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_: Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowercase ( _a ,_a ,_a=None ,_a=None ) -> int:
if attention_mask is None:
UpperCAmelCase_: Any = tf.cast(tf.math.not_equal(_a ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase__ :
snake_case_ = OPTConfig
snake_case_ = {}
snake_case_ = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=16 , A__=2 , A__=4 , A__=4 , A__="gelu" , A__=0.1 , A__=0.1 , A__=20 , A__=2 , A__=1 , A__=0 , A__=16 , A__=16 , ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = parent
UpperCAmelCase_: List[str] = batch_size
UpperCAmelCase_: Tuple = seq_length
UpperCAmelCase_: Optional[Any] = is_training
UpperCAmelCase_: Dict = use_labels
UpperCAmelCase_: str = vocab_size
UpperCAmelCase_: Optional[int] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: Any = num_attention_heads
UpperCAmelCase_: Optional[Any] = intermediate_size
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: Any = hidden_dropout_prob
UpperCAmelCase_: Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_: str = max_position_embeddings
UpperCAmelCase_: int = eos_token_id
UpperCAmelCase_: int = pad_token_id
UpperCAmelCase_: List[str] = bos_token_id
UpperCAmelCase_: Optional[int] = embed_dim
UpperCAmelCase_: List[str] = word_embed_proj_dim
UpperCAmelCase_: str = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_: List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_: Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_: str = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A__ , **self.config_updates , )
UpperCAmelCase_: Optional[Any] = prepare_opt_inputs_dict(A__ , A__ )
return config, inputs_dict
def snake_case_ ( self , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TFOPTModel(config=A__ )
UpperCAmelCase_: Optional[Any] = inputs_dict["input_ids"]
UpperCAmelCase_: str = input_ids[:1, :]
UpperCAmelCase_: Optional[Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_: Dict = 1
# first forward pass
UpperCAmelCase_: Optional[int] = model(A__ , attention_mask=A__ , use_cache=A__ )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_: Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_: str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_: Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_: Dict = model(A__ , attention_mask=A__ )[0]
UpperCAmelCase_: Union[str, Any] = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_: Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_: str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1E-3 )
@require_tf
class UpperCAmelCase__ ( snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 10
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = TFOPTModelTester(self )
UpperCAmelCase_: Dict = ConfigTester(self , config_class=A__ )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A__ , A__ ):
if hasattr(A__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCAmelCase_: Optional[Any] = model_class(config=A__ )
UpperCAmelCase_: str = _get_word_embedding_weight(A__ , model.get_input_embeddings() )
UpperCAmelCase_: int = _get_word_embedding_weight(A__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A__ )
UpperCAmelCase_: int = _get_word_embedding_weight(A__ , model.get_input_embeddings() )
UpperCAmelCase_: Dict = _get_word_embedding_weight(A__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCAmelCase_: Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A__ )
# check that weights remain the same after resizing
UpperCAmelCase_: List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_: List[Any] = False
self.assertTrue(A__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A__ )
UpperCAmelCase_: str = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase_: Optional[int] = False
self.assertTrue(A__ )
def lowercase ( _a ) -> Any:
return tf.constant(_a ,dtype=tf.intaa )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
snake_case_ = 99
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCAmelCase_: Dict = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCAmelCase_: Tuple = input_ids.shape[0]
UpperCAmelCase_: Dict = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCAmelCase_: Tuple = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase_: Dict = tf.not_equal(A__ , model.config.pad_token_id )
with tf.GradientTape():
UpperCAmelCase_: str = model(input_ids=A__ , attention_mask=A__ ).last_hidden_state
UpperCAmelCase_: Union[str, Any] = (1, 11, 512)
self.assertEqual(output.shape , A__ )
UpperCAmelCase_: List[str] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A__ , atol=4E-3 ) )
UpperCAmelCase_: Tuple = tf.function(A__ , jit_compile=A__ )
UpperCAmelCase_: Tuple = xla_generate(A__ , A__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A__ , atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: Any = "facebook/opt-350m"
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCAmelCase_: List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
UpperCAmelCase_: Dict = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCAmelCase_: Optional[Any] = tokenizer(A__ , return_tensors="tf" , padding=A__ , add_special_tokens=A__ )
UpperCAmelCase_: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCAmelCase_: Optional[int] = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(A__ , A__ , atol=1E-4 ) )
UpperCAmelCase_: Optional[int] = tf.function(A__ , jit_compile=A__ )
UpperCAmelCase_: str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A__ , A__ , atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def snake_case_ ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = "facebook/opt-125m"
UpperCAmelCase_: int = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_: Dict = []
UpperCAmelCase_: Optional[Any] = GPTaTokenizer.from_pretrained(A__ )
UpperCAmelCase_: Any = TFOPTForCausalLM.from_pretrained(A__ )
for prompt in self.prompts:
UpperCAmelCase_: Optional[Any] = tokenizer(A__ , return_tensors="tf" ).input_ids
UpperCAmelCase_: List[Any] = model.generate(A__ , max_length=10 )
UpperCAmelCase_: Optional[Any] = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
predicted_outputs += generated_string
self.assertListEqual(A__ , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = "facebook/opt-350m"
UpperCAmelCase_: Tuple = GPTaTokenizer.from_pretrained(A__ )
UpperCAmelCase_: Optional[Any] = TFOPTForCausalLM.from_pretrained(A__ )
UpperCAmelCase_: Optional[int] = "left"
# use different length sentences to test batching
UpperCAmelCase_: Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
UpperCAmelCase_: Union[str, Any] = tokenizer(A__ , return_tensors="tf" , padding=A__ )
UpperCAmelCase_: Union[str, Any] = inputs["input_ids"]
UpperCAmelCase_: List[Any] = model.generate(input_ids=A__ , attention_mask=inputs["attention_mask"] )
UpperCAmelCase_: int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase_: Any = model.generate(input_ids=A__ )
UpperCAmelCase_: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCAmelCase_: Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase_: Optional[int] = model.generate(input_ids=A__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_: str = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
UpperCAmelCase_: Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A__ )
UpperCAmelCase_: str = tokenizer.decode(output_padded[0] , skip_special_tokens=A__ )
UpperCAmelCase_: Any = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = "facebook/opt-350m"
UpperCAmelCase_: Tuple = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Union[str, Any] = GPTaTokenizer.from_pretrained(A__ )
UpperCAmelCase_: Dict = TFOPTForCausalLM.from_pretrained(A__ )
for prompt in self.prompts:
UpperCAmelCase_: List[Any] = tokenizer(A__ , return_tensors="tf" ).input_ids
UpperCAmelCase_: Union[str, Any] = model.generate(A__ , max_length=10 )
UpperCAmelCase_: Optional[Any] = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
predicted_outputs += generated_string
self.assertListEqual(A__ , A__ ) | 137 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase__ :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.02 , A__=False , A__=True , A__="None" , A__=3 , A__=4 , A__=None , ):
"""simple docstring"""
UpperCAmelCase_: str = parent
UpperCAmelCase_: Any = batch_size
UpperCAmelCase_: Union[str, Any] = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: List[Any] = use_input_mask
UpperCAmelCase_: List[str] = use_token_type_ids
UpperCAmelCase_: Any = use_labels
UpperCAmelCase_: Optional[Any] = vocab_size
UpperCAmelCase_: List[Any] = hidden_size
UpperCAmelCase_: int = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[Any] = intermediate_size
UpperCAmelCase_: Optional[Any] = hidden_act
UpperCAmelCase_: List[Any] = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Tuple = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[Any] = type_sequence_label_size
UpperCAmelCase_: Union[str, Any] = initializer_range
UpperCAmelCase_: Any = num_labels
UpperCAmelCase_: Tuple = num_choices
UpperCAmelCase_: str = relative_attention
UpperCAmelCase_: Optional[Any] = position_biased_input
UpperCAmelCase_: Any = pos_att_type
UpperCAmelCase_: Any = scope
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_: int = None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Dict = None
if self.use_labels:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_: Union[str, Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = TFDebertaVaModel(config=A__ )
UpperCAmelCase_: Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_: Dict = [input_ids, input_mask]
UpperCAmelCase_: Union[str, Any] = model(A__ )
UpperCAmelCase_: Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = TFDebertaVaForMaskedLM(config=A__ )
UpperCAmelCase_: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Optional[Any] = TFDebertaVaForSequenceClassification(config=A__ )
UpperCAmelCase_: Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Any = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: Any = TFDebertaVaForTokenClassification(config=A__ )
UpperCAmelCase_: int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = TFDebertaVaForQuestionAnswering(config=A__ )
UpperCAmelCase_: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase_: Any = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Optional[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = TFDebertaVaModelTester(self )
UpperCAmelCase_: str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(A__ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def snake_case_ ( self ):
"""simple docstring"""
pass
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCAmelCase_: List[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase_: Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_: Tuple = model(A__ , attention_mask=A__ )[0]
UpperCAmelCase_: Dict = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) | 137 | 1 |
'''simple docstring'''
import math
class __snake_case :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Optional[Any]=0 ) -> str: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ : int = n
lowerCAmelCase_ : Dict = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ : List[str] = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __lowercase ( self : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
lowerCAmelCase_ : Tuple = w
def __lowercase ( self : Optional[int] ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ) -> Tuple:
return self.dp[u][v]
if __name__ == "__main__":
__A : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 709 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'timesformer'
def __init__( self : List[Any] , lowerCamelCase : List[Any]=2_24 , lowerCamelCase : List[str]=16 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : List[Any]=8 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : Any=30_72 , lowerCamelCase : str="gelu" , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : str=0.02 , lowerCamelCase : Any=1E-6 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Tuple="divided_space_time" , lowerCamelCase : int=0 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Tuple = qkv_bias
lowerCAmelCase_ : List[Any] = attention_type
lowerCAmelCase_ : List[Any] = drop_path_rate
| 398 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__snake_case =TypeVar("""T""")
__snake_case =TypeVar("""U""")
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self : int , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ) -> Any:
lowerCAmelCase = key
lowerCAmelCase = val
lowerCAmelCase = None
lowerCAmelCase = None
def __repr__( self : Optional[int] ) -> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self : str ) -> None:
lowerCAmelCase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = self.rear, self.head
def __repr__( self : Union[str, Any] ) -> str:
lowerCAmelCase = ['DoubleLinkedList']
lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ) -> None:
lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase = node
lowerCAmelCase = previous
lowerCAmelCase = node
lowerCAmelCase = self.rear
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
lowerCAmelCase = node.next
lowerCAmelCase = node.prev
lowerCAmelCase = None
lowerCAmelCase = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Dict , UpperCAmelCase__ : int ) -> List[str]:
lowerCAmelCase = DoubleLinkedList()
lowerCAmelCase = capacity
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = {}
def __repr__( self : Tuple ) -> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : List[Any] , UpperCAmelCase__ : T ) -> bool:
return key in self.cache
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCAmelCase = self.cache[key]
lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : T , UpperCAmelCase__ : U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase = value
self.list.add(UpperCAmelCase__ )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , UpperCAmelCase__ : int = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase = LRUCache(UpperCAmelCase__ )
lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase = func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , 'cache_info' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 | 1 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple="divided_space_time" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames) * self.num_patches_per_frame + 1
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCAmelCase__ = self.num_labels
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
lowerCAmelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowerCAmelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = TimesformerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.num_frames
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def a ( self : List[str] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =AutoencoderKL()
lowerCamelCase__: Tuple =DDIMScheduler()
lowerCamelCase__: Tuple ={"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=0) ->List[Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Any =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Union[str, Any] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu"
lowerCamelCase__: Union[str, Any] =self.get_dummy_components()
lowerCamelCase__: List[str] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: List[str] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: Any =pipe(**UpperCAmelCase_).images
lowerCamelCase__: List[str] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
lowerCamelCase__: Optional[Any] =np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457])
lowerCamelCase__: List[Any] =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase_ , expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =torch.manual_seed(0)
lowerCamelCase__: Tuple =DiTPipeline.from_pretrained("facebook/DiT-XL-2-256")
pipe.to("cuda")
lowerCamelCase__: Any =["vase", "umbrella", "white shark", "white wolf"]
lowerCamelCase__: Any =pipe.get_label_ids(UpperCAmelCase_)
lowerCamelCase__: Any =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=40 , output_type="np").images
for word, image in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""")
assert np.abs((expected_image - image).max()) < 1E-2
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =DiTPipeline.from_pretrained("facebook/DiT-XL-2-512")
lowerCamelCase__: List[str] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
lowerCamelCase__: List[str] =["vase", "umbrella"]
lowerCamelCase__: List[Any] =pipe.get_label_ids(UpperCAmelCase_)
lowerCamelCase__: str =torch.manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="np").images
for word, image in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""")
assert np.abs((expected_image - image).max()) < 1E-1
| 703 |
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __UpperCamelCase ( lowercase ):
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = params
UpperCAmelCase_ = np.array(lowerCAmelCase )
UpperCAmelCase_ = np.array([len(lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.lengths )
def __A ( self : str ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.params.max_model_input_size
UpperCAmelCase_ = self.lengths > max_len
logger.info(F"Splitting {sum(lowerCAmelCase )} too long sequences." )
def divide_chunks(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase )]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if self.params.mlm:
UpperCAmelCase_ , UpperCAmelCase_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCAmelCase_ , UpperCAmelCase_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase_ = np.insert(lowerCAmelCase , 0 , lowerCAmelCase )
if sub_s[-1] != sep_id:
UpperCAmelCase_ = np.insert(lowerCAmelCase , len(lowerCAmelCase ) , lowerCAmelCase )
assert len(lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase )
new_tok_ids.extend(lowerCAmelCase )
new_lengths.extend([len(lowerCAmelCase ) for l in sub_seqs] )
UpperCAmelCase_ = np.array(lowerCAmelCase )
UpperCAmelCase_ = np.array(lowerCAmelCase )
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = len(self )
UpperCAmelCase_ = self.lengths > 11
UpperCAmelCase_ = self.token_ids[indices]
UpperCAmelCase_ = self.lengths[indices]
UpperCAmelCase_ = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def __A ( self : int ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase_ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase_ = len(self )
UpperCAmelCase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase_ = (unk_occs / self.lengths) < 0.5
UpperCAmelCase_ = self.token_ids[indices]
UpperCAmelCase_ = self.lengths[indices]
UpperCAmelCase_ = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def __A ( self : Optional[int] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __A ( self : str , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = [t[0] for t in batch]
UpperCAmelCase_ = [t[1] for t in batch]
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
# Max for paddings
UpperCAmelCase_ = max(lowerCAmelCase )
# Pad token ids
if self.params.mlm:
UpperCAmelCase_ = self.params.special_tok_ids["pad_token"]
else:
UpperCAmelCase_ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase_ = [list(t.astype(lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase )
assert all(len(lowerCAmelCase ) == max_seq_len_ for t in tk_ )
UpperCAmelCase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ) # (bs)
return tk_t, lg_t | 162 |
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 162 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : Dict = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase : List[str] = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : Tuple = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCAmelCase : Dict = model.state_dict()
lowerCAmelCase : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase : Any = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
lowerCAmelCase : str = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
lowerCAmelCase : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase : Tuple = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
lowerCAmelCase : List[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
lowerCAmelCase : Optional[int] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
lowerCAmelCase : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
lowerCAmelCase : Optional[Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
lowerCAmelCase : int = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
lowerCAmelCase : Dict = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
lowerCAmelCase : Any = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
lowerCAmelCase : Tuple = state_dict['cls.predictions.decoder.weight']
lowerCAmelCase : Optional[int] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : int = state_dict[F'cls.predictions.transform.dense.{w}']
lowerCAmelCase : List[Any] = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 353 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : str = AudioDiffusionPipeline(vqvae=_SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 )
SCREAMING_SNAKE_CASE_ : Any = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 , return_dict=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : List[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : List[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_vqvae_and_unet
SCREAMING_SNAKE_CASE_ : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(raw_audio=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : int = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_unet_condition
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.rand((1, 1, 10) )
SCREAMING_SNAKE_CASE_ : Any = pipe(generator=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images[0]
SCREAMING_SNAKE_CASE_ : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch_device
SCREAMING_SNAKE_CASE_ : str = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(generator=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
SCREAMING_SNAKE_CASE_ : Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 353 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case : Any =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' ,UpperCAmelCase__ ,)
if isinstance(UpperCAmelCase__ ,torch.Tensor):
return image
elif isinstance(UpperCAmelCase__ ,PIL.Image.Image):
lowerCAmelCase__ : Dict = [image]
if isinstance(image[0] ,PIL.Image.Image):
lowerCAmelCase__ , lowerCAmelCase__ : str = image[0].size
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase__ : Union[str, Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
lowerCAmelCase__ : Optional[int] = np.concatenate(UpperCAmelCase__ ,axis=0)
lowerCAmelCase__ : Optional[int] = np.array(UpperCAmelCase__).astype(np.floataa) / 255.0
lowerCAmelCase__ : Dict = image.transpose(0 ,3 ,1 ,2)
lowerCAmelCase__ : Optional[int] = 2.0 * image - 1.0
lowerCAmelCase__ : List[str] = torch.from_numpy(UpperCAmelCase__)
elif isinstance(image[0] ,torch.Tensor):
lowerCAmelCase__ : Union[str, Any] = torch.cat(UpperCAmelCase__ ,dim=0)
return image
def lowerCAmelCase__ ( lowerCamelCase_ : Dict):
'''simple docstring'''
if isinstance(UpperCAmelCase__ ,torch.Tensor):
return mask
elif isinstance(UpperCAmelCase__ ,PIL.Image.Image):
lowerCAmelCase__ : Any = [mask]
if isinstance(mask[0] ,PIL.Image.Image):
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = mask[0].size
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Tuple = [np.array(m.convert('''L''').resize((w, h) ,resample=PIL_INTERPOLATION['''nearest''']))[None, :] for m in mask]
lowerCAmelCase__ : Union[str, Any] = np.concatenate(UpperCAmelCase__ ,axis=0)
lowerCAmelCase__ : Union[str, Any] = mask.astype(np.floataa) / 255.0
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[str] = torch.from_numpy(UpperCAmelCase__)
elif isinstance(mask[0] ,torch.Tensor):
lowerCAmelCase__ : Any = torch.cat(UpperCAmelCase__ ,dim=0)
return mask
class lowerCamelCase__ ( _snake_case):
'''simple docstring'''
snake_case_ =42
snake_case_ =42
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase__ ,scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 2_50 ,__lowerCamelCase = 0.0 ,__lowerCamelCase = 10 ,__lowerCamelCase = 10 ,__lowerCamelCase = None ,__lowerCamelCase = "pil" ,__lowerCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = image
lowerCAmelCase__ : Tuple = _preprocess_image(UpperCamelCase__ )
lowerCAmelCase__ : Union[str, Any] = original_image.to(device=self.device ,dtype=self.unet.dtype )
lowerCAmelCase__ : Optional[Any] = _preprocess_mask(UpperCamelCase__ )
lowerCAmelCase__ : Union[str, Any] = mask_image.to(device=self.device ,dtype=self.unet.dtype )
lowerCAmelCase__ : List[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCAmelCase__ : int = original_image.shape
lowerCAmelCase__ : int = randn_tensor(UpperCamelCase__ ,generator=UpperCamelCase__ ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,self.device )
lowerCAmelCase__ : Dict = eta
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps[0] + 1
lowerCAmelCase__ : Tuple = generator[0] if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase__ : Optional[int] = self.unet(UpperCamelCase__ ,UpperCamelCase__ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase__ : Any = self.scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase__ : Dict = self.scheduler.undo_step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
lowerCAmelCase__ : List[str] = t
lowerCAmelCase__ : Tuple = (image / 2 + 0.5).clamp(0 ,1 )
lowerCAmelCase__ : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Any = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 647 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowerCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> List[str]:
if subparsers is not None:
A_ = subparsers.add_parser("""tpu-config""", description=_description )
else:
A_ = argparse.ArgumentParser("""Accelerate tpu-config command""", description=_description )
# Core arguments
A_ = parser.add_argument_group(
"""Config Arguments""", """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""Path to the config file to use for accelerate.""", )
config_args.add_argument(
"""--tpu_name""", default=UpperCAmelCase__, help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""", )
config_args.add_argument(
"""--tpu_zone""", default=UpperCAmelCase__, help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""", )
A_ = parser.add_argument_group("""TPU Arguments""", """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""", action="""store_true""", help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""", )
pod_args.add_argument(
"""--command_file""", default=UpperCAmelCase__, help="""The path to the file containing the commands to run on the pod on startup.""", )
pod_args.add_argument(
"""--command""", action="""append""", nargs="""+""", help="""A command to run on the pod. Can be passed multiple times.""", )
pod_args.add_argument(
"""--install_accelerate""", action="""store_true""", help="""Whether to install accelerate on the pod. Defaults to False.""", )
pod_args.add_argument(
"""--accelerate_version""", default="""latest""", help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""", )
pod_args.add_argument(
"""--debug""", action="""store_true""", help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
A_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A_ = defaults.command_file
if not args.command and defaults.commands is not None:
A_ = defaults.commands
if not args.tpu_name:
A_ = defaults.tpu_name
if not args.tpu_zone:
A_ = defaults.tpu_zone
if args.accelerate_version == "dev":
A_ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A_ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ), UpperCAmelCase__ ):
A_ = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file, """r""" ) as f:
A_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCAmelCase__ ):
A_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A_ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
A_ = """; """.join(UpperCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A_ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(UpperCAmelCase__ )}''' )
return
subprocess.run(UpperCAmelCase__ )
print("""Successfully setup pod.""" )
def UpperCAmelCase__ ( ) -> int:
A_ = tpu_command_parser()
A_ = parser.parse_args()
tpu_command_launcher(UpperCAmelCase__ )
| 288 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
__SCREAMING_SNAKE_CASE = str(abs(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = [list(UpperCamelCase_ ) for char in range(len(UpperCamelCase_ ) )]
for index in range(len(UpperCamelCase_ ) ):
num_transpositions[index].pop(UpperCamelCase_ )
return max(
int("""""".join(list(UpperCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 248 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(UpperCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( UpperCamelCase_ = 200_0000 ):
return sum(takewhile(lambda UpperCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Optional[int] = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Union[str, Any] = """huggingface/label-files"""
lowercase : Optional[int] = """imagenet-1k-id2label.json"""
lowercase : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowercase : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowercase : Tuple = {v: k for k, v in idalabel.items()}
lowercase : int = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase : Optional[int] = BitConfig(
conv_layer=__UpperCamelCase , num_labels=1_000 , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if "stem.conv" in name:
lowercase : Dict = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowercase : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowercase : str = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowercase : int = """bit.encoder.""" + name
return name
def _snake_case( ) -> List[str]:
lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Tuple = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
lowercase : List[str] = get_config(__UpperCamelCase )
# load original model from timm
lowercase : int = create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
lowercase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase : Any = state_dict.pop(__UpperCamelCase )
lowercase : Optional[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowercase : Union[str, Any] = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
lowercase : List[str] = create_transform(**resolve_data_config({} , model=__UpperCamelCase ) )
lowercase : List[Any] = transform.transforms
lowercase : Union[str, Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowercase : Optional[int] = BitImageProcessor(
do_resize=__UpperCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase : Any = prepare_img()
lowercase : Dict = transform(__UpperCamelCase ).unsqueeze(0 )
lowercase : Dict = processor(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase , __UpperCamelCase )
# verify logits
with torch.no_grad():
lowercase : int = model(__UpperCamelCase )
lowercase : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase : List[Any] = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
def __init__( self , **UpperCamelCase_ ):
super().__init__(**UpperCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = {}
if "candidate_labels" in kwargs:
__magic_name__ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__magic_name__ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="This is a photo of {}." ):
__magic_name__ = load_image(UpperCamelCase_ )
__magic_name__ = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ = candidate_labels
__magic_name__ = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
__magic_name__ = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
__magic_name__ = [text_inputs]
return inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = model_inputs.pop('''candidate_labels''' )
__magic_name__ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
__magic_name__ = text_inputs[0]
else:
# Batching case.
__magic_name__ = text_inputs[0][0]
__magic_name__ = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = model_outputs.pop('''candidate_labels''' )
__magic_name__ = model_outputs['''logits'''][0]
if self.framework == "pt":
__magic_name__ = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = [scores]
elif self.framework == "tf":
__magic_name__ = stable_softmax(UpperCamelCase_ , axis=-1 )
__magic_name__ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__magic_name__ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 490 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A = get_tests_dir('fixtures')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ) -> str:
# A mock response for an HTTP head request to emulate server down
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=snake_case__ ) as mock_head:
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Tuple ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : List[Any] ) -> Optional[Any]:
_lowerCamelCase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _snake_case ( cls : Dict ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _snake_case ( self : Dict ) -> int:
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id='test-feature-extractor' , push_to_hub=snake_case__ , use_auth_token=self._token )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _snake_case ( self : Optional[int] ) -> Optional[int]:
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=snake_case__ , use_auth_token=self._token )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _snake_case ( self : Union[str, Any] ) -> int:
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase = CustomFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
_lowerCamelCase = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' ) | 700 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 234 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
A : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
A : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
A : Optional[str] = field(
default=a_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
A : Optional[str] = field(
default=a_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def UpperCAmelCase__( ):
"""simple docstring"""
__A= HfArgumentParser((ModelArguments,) )
((__A), )= parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__A= AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__A= AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__A= AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__A= AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__A= True
__A= True
__A= FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path,decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path,encoder_config=_SCREAMING_SNAKE_CASE,decoder_config=_SCREAMING_SNAKE_CASE,)
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__A= decoder_config.decoder_start_token_id
__A= decoder_config.pad_token_id
if decoder_start_token_id is None:
__A= decoder_config.bos_token_id
if pad_token_id is None:
__A= decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__A= decoder_config.eos_token_id
__A= decoder_start_token_id
__A= pad_token_id
__A= AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__A= AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__A= tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
re.sub('<n>','',_SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 186 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
A_ = "pytorch_model.bin"
A_ = "pytorch_model.bin.index.json"
A_ = "adapter_config.json"
A_ = "adapter_model.bin"
A_ = "adapter_model.safetensors"
A_ = "tf_model.h5"
A_ = "tf_model.h5.index.json"
A_ = "model.ckpt"
A_ = "flax_model.msgpack"
A_ = "flax_model.msgpack.index.json"
A_ = "model.safetensors"
A_ = "model.safetensors.index.json"
A_ = "config.json"
A_ = "preprocessor_config.json"
A_ = FEATURE_EXTRACTOR_NAME
A_ = "generation_config.json"
A_ = "modelcard.json"
A_ = "▁"
A_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
A_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
A_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
A_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def A_ ( snake_case ):
if version.parse(snake_case ) < version.parse(snake_case ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE:str = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
SCREAMING_SNAKE_CASE:List[str] = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 465 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : Any = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
| 465 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case = logging.get_logger(__name__)
__snake_case = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A ( _lowercase ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase = model_type_to_module_name(_lowercase )
__UpperCamelCase = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '__name__' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase = importlib.import_module('transformers' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def _A ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowercase , encoding='utf-8' ) as reader:
return json.load(_lowercase )
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(A_ )
def snake_case_ ( cls: List[Any],A_: Dict,**A_: int ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('config',A_ )
__UpperCamelCase = kwargs.pop('trust_remote_code',A_ )
__UpperCamelCase = True
__UpperCamelCase, __UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(A_,**A_ )
__UpperCamelCase = config_dict.get('feature_extractor_type',A_ )
__UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map',{} ):
__UpperCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A_,A_ ):
__UpperCamelCase = AutoConfig.from_pretrained(A_,**A_ )
# It could be in `config.feature_extractor_type``
__UpperCamelCase = getattr(A_,'feature_extractor_type',A_ )
if hasattr(A_,'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__UpperCamelCase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__UpperCamelCase = feature_extractor_class_from_name(A_ )
__UpperCamelCase = feature_extractor_auto_map is not None
__UpperCamelCase = feature_extractor_class is not None or type(A_ ) in FEATURE_EXTRACTOR_MAPPING
__UpperCamelCase = resolve_trust_remote_code(
A_,A_,A_,A_ )
if has_remote_code and trust_remote_code:
__UpperCamelCase = get_class_from_dynamic_module(
A_,A_,**A_ )
__UpperCamelCase = kwargs.pop('code_revision',A_ )
if os.path.isdir(A_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A_,**A_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A_,**A_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A_ ) in FEATURE_EXTRACTOR_MAPPING:
__UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(A_ )]
return feature_extractor_class.from_dict(A_,**A_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def snake_case_ ( A_: Any,A_: Tuple ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(A_,A_ )
| 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 142 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : str = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def A__ (cls , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
| 142 | 1 |
"""simple docstring"""
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase__ : Any ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = image.size
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Dict = image.load()
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Tuple = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase_ : List[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[Any] = []
_a : Tuple = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : int = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : int = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Union[str, Any] = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : str = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 319 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,'embed_dim' ) )
self.parent.assertTrue(hasattr(_a ,'num_heads' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : List[str] ,_a : Tuple=13 ,_a : Optional[int]=64 ,_a : List[Any]=3 ,_a : Union[str, Any]=[16, 48, 96] ,_a : List[Any]=[1, 3, 6] ,_a : Optional[Any]=[1, 2, 10] ,_a : List[Any]=[7, 3, 3] ,_a : Tuple=[4, 2, 2] ,_a : List[str]=[2, 1, 1] ,_a : int=[2, 2, 2] ,_a : List[Any]=[False, False, True] ,_a : List[Any]=[0.0, 0.0, 0.0] ,_a : Dict=0.02 ,_a : str=1E-12 ,_a : Optional[Any]=True ,_a : List[str]=True ,_a : List[str]=2 ,):
'''simple docstring'''
_a : Union[str, Any] = parent
_a : Optional[int] = batch_size
_a : int = image_size
_a : Tuple = patch_sizes
_a : str = patch_stride
_a : Optional[Any] = patch_padding
_a : str = is_training
_a : Dict = use_labels
_a : Optional[Any] = num_labels
_a : Any = num_channels
_a : str = embed_dim
_a : Optional[Any] = num_heads
_a : Optional[int] = stride_kv
_a : str = depth
_a : int = cls_token
_a : Optional[int] = attention_drop_rate
_a : Any = initializer_range
_a : int = layer_norm_eps
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def __lowercase ( self : List[str] ,_a : Tuple ,_a : str ,_a : Any ):
'''simple docstring'''
_a : Optional[Any] = CvtModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
_a : int = (self.image_size, self.image_size)
_a, _a : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_a : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_a : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def __lowercase ( self : Optional[int] ,_a : List[Any] ,_a : int ,_a : int ):
'''simple docstring'''
_a : Tuple = self.num_labels
_a : Any = CvtForImageClassification(_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
_a, _a, _a : Union[str, Any] = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = CvtModelTester(self )
_a : Optional[Any] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='Cvt does not output attentions' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : int = [*signature.parameters.keys()]
_a : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Optional[int] ,_a : Optional[int] ,_a : List[str] ):
_a : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : List[Any] = outputs.hidden_states
_a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_a ) ,_a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = CvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_a : Any = self.default_image_processor
_a : Dict = prepare_img()
_a : int = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : Optional[int] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 319 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ : Optional[Any] = logging.getLogger(__name__)
class lowercase_ ( lowercase__ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Dict:
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
lowerCAmelCase = None
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
lowerCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCAmelCase = str(distributed_port + 1 )
lowerCAmelCase = dist.new_group(ranks=_A , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa ) ->Optional[Any]:
lowerCAmelCase = torch.empty(_A , dtype=_A )
dist.scatter(_A , src=0 , scatter_list=_A , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCAmelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , _A )
return ifname
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowerCAmelCase = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
# distributed training
lowerCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
lowerCAmelCase = None
if self._is_main():
lowerCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_A )]
dist.gather(torch.tensor(_A ) , dst=0 , gather_list=_A , group=self.process_group )
# scatter logic
lowerCAmelCase = question_hidden_states.shape[0]
lowerCAmelCase = []
lowerCAmelCase = []
if self._is_main():
assert len(_A ) == world_size
lowerCAmelCase = self._main_retrieve(torch.cat(_A ).numpy() , _A )
lowerCAmelCase = torch.tensor(_A ), torch.tensor(_A )
lowerCAmelCase = self._chunk_tensor(_A , _A )
lowerCAmelCase = self._chunk_tensor(_A , _A )
lowerCAmelCase = self._scattered(_A , [n_queries, n_docs] , target_type=torch.intaa )
lowerCAmelCase = self._scattered(_A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_A )
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 561 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
def a__ ( A__, A__, A__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( A__, A__, A__ ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def a__ ( A__, A__, A__ ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def a__ ( A__, A__, A__ ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 1
while repunit:
_SCREAMING_SNAKE_CASE : Tuple = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 572 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''beit'''
def __init__( self , lowercase_=8_1_9_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_=3 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=[3, 5, 7, 1_1] , lowercase_=[1, 2, 3, 6] , lowercase_=True , lowercase_=0.4 , lowercase_=2_5_6 , lowercase_=1 , lowercase_=False , lowercase_=2_5_5 , **lowercase_ , ) -> str:
super().__init__(**lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = use_mask_token
__snake_case = use_absolute_position_embeddings
__snake_case = use_relative_position_bias
__snake_case = use_shared_relative_position_bias
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case = out_indices
__snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = semantic_loss_ignore_index
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
_UpperCamelCase : Optional[int] =namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase_ ( A_ ):
if root is None:
return 0
# Validation
def count_nodes(A_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A_ ) != count_coins(A_ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase , __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase , __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A_ )
+ abs(A_ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A_ , A_ )
return get_distrib(A_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCamelCase : Any =HUGGINGFACE_HUB_CACHE
_UpperCamelCase : List[str] ="config.json"
_UpperCamelCase : Union[str, Any] ="diffusion_pytorch_model.bin"
_UpperCamelCase : List[str] ="diffusion_flax_model.msgpack"
_UpperCamelCase : Any ="model.onnx"
_UpperCamelCase : List[Any] ="diffusion_pytorch_model.safetensors"
_UpperCamelCase : str ="weights.pb"
_UpperCamelCase : Union[str, Any] ="https://huggingface.co"
_UpperCamelCase : Any =default_cache_path
_UpperCamelCase : List[str] ="diffusers_modules"
_UpperCamelCase : Optional[Any] =os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_UpperCamelCase : str =["fp16", "non-ema"]
_UpperCamelCase : str =".self_attn"
| 316 | 1 |
'''simple docstring'''
import random
class _snake_case :
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Union[str, Any] = [ord(_lowerCamelCase ) for i in text]
SCREAMING_SNAKE_CASE:Optional[Any] = []
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for i in plain:
SCREAMING_SNAKE_CASE:List[str] = random.randint(1 ,300 )
SCREAMING_SNAKE_CASE:int = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[int] ,SCREAMING_SNAKE_CASE__ : list[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for i in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE:str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
A_ , A_ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 701 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_ ( snake_case=32 , snake_case=10 , snake_case=100 , snake_case=1026 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=1026 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE:Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
SCREAMING_SNAKE_CASE:Optional[Any] = load_gpta("gpt2" ).to(snake_case )
print("computing perplexity on objective set" )
SCREAMING_SNAKE_CASE:str = compute_perplexity(snake_case , snake_case , snake_case ).item()
print("perplexity on objective set:" , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_ ( snake_case , snake_case=15 , snake_case=128 , snake_case=100 , snake_case="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE:int = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE:Optional[Any] = SecondaryLearner(snake_case )
# Train secondary learner
SCREAMING_SNAKE_CASE:int = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=100 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_ ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=1000 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE:str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:Dict = RandomSampler(snake_case )
SCREAMING_SNAKE_CASE:str = DataLoader(snake_case , sampler=snake_case )
SCREAMING_SNAKE_CASE:str = max_steps // (len(snake_case )) + 1
SCREAMING_SNAKE_CASE:Tuple = 0
SCREAMING_SNAKE_CASE:List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
SCREAMING_SNAKE_CASE:List[Any] = []
SCREAMING_SNAKE_CASE:Tuple = 0
SCREAMING_SNAKE_CASE:Tuple = []
SCREAMING_SNAKE_CASE:Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE:int = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE:str = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE:List[str] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE:Any = model(snake_case , labels=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE:List[str] = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE:int = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE:Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE:int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE:Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=snake_case , default=snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=snake_case , default=snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=snake_case , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=snake_case , default=snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=snake_case , type=snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=snake_case , type=snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE:Union[str, Any] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
SCREAMING_SNAKE_CASE:Optional[int] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE:Dict = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 465 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : Dict = {'vocab_file': 'vocab.txt'}
A_ : str = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
A_ : Optional[int] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
A_ : Tuple = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = VOCAB_FILES_NAMES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = ConvBertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A__ ) != tokenize_chinese_chars
):
A__ : Optional[Any] = getattr(A__ , normalizer_state.pop("""type""" ) )
A__ : Optional[int] = do_lower_case
A__ : List[str] = strip_accents
A__ : Optional[Any] = tokenize_chinese_chars
A__ : int = normalizer_class(**A__ )
A__ : str = do_lower_case
def __A ( self , A__ , A__=None ):
A__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : int = [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 456 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCamelCase () -> Union[str, Any]:
A__ : int = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowercase_ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowercase_ , default=5 )
parser.add_argument("""--batch_size""" , type=lowercase_ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowercase_ , default=1 )
parser.add_argument("""--freeze""" , type=lowercase_ , default=lowercase_ )
parser.add_argument("""--learning_rate""" , type=lowercase_ , default=5E-4 )
parser.add_argument("""--seed""" , type=lowercase_ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowercase_ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowercase_ , default=10 )
parser.add_argument("""--weight_decay""" , type=lowercase_ , default=0.01 )
parser.add_argument("""--output_dir""" , type=lowercase_ , default="""./results""" )
return parser.parse_args()
A_ : Any = load('accuracy')
def UpperCamelCase (lowercase_: Dict ) -> Any:
A__ , A__ : int = eval_pred
A__ : List[str] = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=lowercase_ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Any = trainer
def __A ( self , A__ , A__ , A__ , **A__ ):
if control.should_evaluate:
A__ : Dict = deepcopy(A__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def UpperCamelCase () -> Optional[int]:
A__ : Optional[Any] = get_args()
set_seed(args.seed )
A__ : Optional[Any] = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A__ : Tuple = dataset.train_test_split(test_size=0.2 )
A__ : List[str] = train_test["""test"""].train_test_split(test_size=0.5 )
A__ : Union[str, Any] = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
A__ : List[str] = tokenizer.eos_token
A__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A__ : Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A__ : Optional[int] = False
A__ : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowercase_: Optional[int] ):
A__ : Union[str, Any] = tokenizer(example["""src"""] , truncation=lowercase_ , max_length=1024 )
A__ : Any = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A__ : int = train_test_validation.map(
lowercase_ , batched=lowercase_ , remove_columns=train_test_validation["""train"""].column_names , )
A__ : List[str] = DataCollatorWithPadding(tokenizer=lowercase_ )
A__ : Any = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A__ : int = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowercase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 456 | 1 |
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__magic_name__ :Dict = mf_knapsack(i - 1, snake_case, snake_case, snake_case )
else:
__magic_name__ :Optional[Any] = max(
mf_knapsack(i - 1, snake_case, snake_case, snake_case ), mf_knapsack(i - 1, snake_case, snake_case, j - wt[i - 1] ) + val[i - 1], )
__magic_name__ :List[Any] = val
return f[i][j]
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
__magic_name__ :List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
__magic_name__ :Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not (isinstance(snake_case, (list, tuple) ) and isinstance(snake_case, (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__magic_name__ :Dict = len(snake_case )
if num_items != len(snake_case ):
__magic_name__ :str = (
'''The number of weights must be the same as the number of values.\n'''
f'''But got {num_items} weights and {len(snake_case )} values'''
)
raise ValueError(snake_case )
for i in range(snake_case ):
if not isinstance(wt[i], snake_case ):
__magic_name__ :str = (
'''All weights must be integers but got weight of '''
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(snake_case )
__magic_name__ , __magic_name__ :Tuple = knapsack(snake_case, snake_case, snake_case, snake_case )
__magic_name__ :set = set()
_construct_solution(snake_case, snake_case, snake_case, snake_case, snake_case )
return optimal_val, example_optional_set
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case, snake_case, i - 1, snake_case, snake_case )
else:
optimal_set.add(snake_case )
_construct_solution(snake_case, snake_case, i - 1, j - wt[i - 1], snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE__ : Any = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = 6
SCREAMING_SNAKE_CASE__ : str = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 180 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=1_0 , __lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase=[1, 1, 2, 1] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=3 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = parent
__magic_name__ :str = batch_size
__magic_name__ :Union[str, Any] = image_size
__magic_name__ :Optional[Any] = num_channels
__magic_name__ :str = embeddings_size
__magic_name__ :List[Any] = hidden_sizes
__magic_name__ :List[str] = depths
__magic_name__ :str = is_training
__magic_name__ :List[Any] = use_labels
__magic_name__ :int = hidden_act
__magic_name__ :Dict = num_labels
__magic_name__ :Any = scope
__magic_name__ :Optional[int] = len(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :int = None
if self.use_labels:
__magic_name__ :int = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ :List[str] = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = RegNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Dict = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_labels
__magic_name__ :str = RegNetForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ :str = config_and_inputs
__magic_name__ :Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = RegNetModelTester(self )
__magic_name__ :Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Dict = model_class(__lowerCAmelCase )
__magic_name__ :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :Optional[Any] = [*signature.parameters.keys()]
__magic_name__ :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Tuple = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
__magic_name__ :int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ :List[str] = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ :Optional[Any] = layer_type
__magic_name__ :List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ :List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :Optional[int] = RegNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
__magic_name__ :List[str] = self.default_image_processor
__magic_name__ :Any = prepare_img()
__magic_name__ :Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
__magic_name__ :int = model(**__lowerCAmelCase )
# verify the logits
__magic_name__ :List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :Any = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 180 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( A ,A = False ) -> float:
if not arr:
return 0
lowercase : Optional[Any] = 0 if allow_empty_subarrays else float("-inf" )
lowercase : List[Any] = 0.0
for num in arr:
lowercase : Dict = max(0 if allow_empty_subarrays else num ,curr_sum + num )
lowercase : Dict = max(A ,A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 372 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> Tuple:
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : int = seq_length
lowercase : List[str] = is_training
lowercase : str = use_attention_mask
lowercase : List[str] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Dict = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : int = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Union[str, Any] = num_choices
def a__ ( self ) -> Optional[int]:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_attention_mask:
lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a_ , )
return config, input_ids, attention_mask
def a__ ( self ) -> List[str]:
lowercase : Dict = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Dict = config_and_inputs
lowercase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Dict:
lowercase : Optional[int] = FlaxDistilBertModelTester(self )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowercase : int = model_class_name.from_pretrained("distilbert-base-uncased" )
lowercase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : str = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : List[str] = model(a_ , attention_mask=a_ )[0]
lowercase : int = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a_ )
lowercase : List[str] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 372 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :int , __lowercase :list[int] ):
__lowerCamelCase : Any =len(__A )
__lowerCamelCase : List[Any] =[0] * len_array
if len_array > 0:
__lowerCamelCase : Tuple =array[0]
for i in range(1 , __A ):
__lowerCamelCase : Any =self.prefix_sum[i - 1] + array[i]
def __lowercase ( self :str , __lowercase :int , __lowercase :int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowercase ( self :Union[str, Any] , __lowercase :int ):
__lowerCamelCase : List[Any] ={0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Dict = CycleDiffusionPipeline
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__snake_case : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
__snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self :Dict ):
torch.manual_seed(0 )
__lowerCamelCase : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : List[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[Any] =CLIPTextModel(__lowercase )
__lowerCamelCase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self :Union[str, Any] , __lowercase :Optional[int] , __lowercase :str=0 ):
__lowerCamelCase : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCamelCase : str =image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Union[str, Any] =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : Any =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Tuple =self.get_dummy_components()
__lowerCamelCase : List[str] =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : int =pipe(**__lowercase )
__lowerCamelCase : Dict =output.images
__lowerCamelCase : Union[str, Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[Any] =np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self :str ):
__lowerCamelCase : str =self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowercase , '''half''' ):
__lowerCamelCase : Union[str, Any] =module.half()
__lowerCamelCase : int =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : List[str] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Optional[int] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : Dict =pipe(**__lowercase )
__lowerCamelCase : List[str] =output.images
__lowerCamelCase : Dict =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : str =np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __lowercase ( self :Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self :str ):
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self :Dict ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict ):
__lowerCamelCase : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase : Any =init_image.resize((512, 512) )
__lowerCamelCase : Optional[Any] ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , safety_checker=__lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Dict ='''A black colored car'''
__lowerCamelCase : Union[str, Any] ='''A blue colored car'''
__lowerCamelCase : Dict =torch.manual_seed(0 )
__lowerCamelCase : Tuple =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Tuple =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase : Optional[Any] =init_image.resize((512, 512) )
__lowerCamelCase : Any ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : List[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : str =CycleDiffusionPipeline.from_pretrained(__lowercase , scheduler=__lowercase , safety_checker=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Any ='''A black colored car'''
__lowerCamelCase : int ='''A blue colored car'''
__lowerCamelCase : Tuple =torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Dict =output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 363 | 0 |
import cva
import numpy as np
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : float , _UpperCamelCase : int ) ->int:
if k in (0.04, 0.06):
snake_case_ = k
snake_case_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Optional[Any] ) ->str:
return str(self.k )
def snake_case__( self : Any , _UpperCamelCase : str ) ->tuple[cva.Mat, list[list[int]]]:
snake_case_ = cva.imread(_UpperCamelCase , 0 )
snake_case_, snake_case_ = img.shape
snake_case_ = []
snake_case_ = img.copy()
snake_case_ = cva.cvtColor(_UpperCamelCase , cva.COLOR_GRAY2RGB )
snake_case_, snake_case_ = np.gradient(_UpperCamelCase )
snake_case_ = dx**2
snake_case_ = dy**2
snake_case_ = dx * dy
snake_case_ = 0.04
snake_case_ = self.window_size // 2
for y in range(_UpperCamelCase , h - offset ):
for x in range(_UpperCamelCase , w - offset ):
snake_case_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = (wxx * wyy) - (wxy**2)
snake_case_ = wxx + wyy
snake_case_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase_ = HarrisCorner(0.04, 3)
lowerCAmelCase_ , lowerCAmelCase_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img) | 39 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
) | 39 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : str ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[Any] , *lowercase : Union[str, Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Tuple = ["""flax"""]
def __init__( self : Dict , *lowercase : List[Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *lowercase : Any , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Tuple , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Optional[int] = ["""flax"""]
def __init__( self : Any , *lowercase : Any , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Optional[Any] = ["""flax"""]
def __init__( self : Dict , *lowercase : List[str] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : List[str] , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Union[str, Any] = ["""flax"""]
def __init__( self : Optional[int] , *lowercase : Dict , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : int , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *lowercase : int , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : List[Any] = ["""flax"""]
def __init__( self : Optional[int] , *lowercase : Optional[Any] , **lowercase : int ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : int , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : str , **lowercase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax"""]
def __init__( self : int , *lowercase : List[str] , **lowercase : int ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : Dict , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Tuple , *lowercase : Any , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Dict = ["""flax"""]
def __init__( self : Dict , *lowercase : Optional[int] , **lowercase : str ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : Tuple , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Any , *lowercase : List[Any] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[int] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Any , *lowercase : Union[str, Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : str , *lowercase : List[Any] , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Union[str, Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *lowercase : Tuple , **lowercase : str ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : List[str] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *lowercase : Any , **lowercase : Tuple ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : Any , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : Optional[Any] , *lowercase : List[str] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _a ( metaclass=__a ):
__a : Tuple = ["""flax"""]
def __init__( self : Optional[Any] , *lowercase : Union[str, Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def A ( cls : int , *lowercase : Tuple , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def A ( cls : List[str] , *lowercase : int , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 358 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
def __init__( self : Optional[Any] , lowercase : int , lowercase : str=13 , lowercase : int=7 , lowercase : Optional[int]=True , lowercase : Dict=True , lowercase : Union[str, Any]=True , lowercase : Union[str, Any]=99 , lowercase : Optional[Any]=32 , lowercase : List[Any]=5 , lowercase : List[Any]=4 , lowercase : int=37 , lowercase : int="gelu" , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Tuple=512 , lowercase : List[str]=16 , lowercase : Optional[int]=2 , lowercase : int=0.02 , lowercase : str=3 , lowercase : List[Any]=4 , lowercase : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A ( self : Any , lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] , *lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str , lowercase : Optional[int] , *lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , *lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , lowercase : Dict , lowercase : Any , lowercase : Any , lowercase : Optional[int] , *lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _a ( __a , __a , __a , unittest.TestCase ):
__a : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__a : Dict = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A ( self : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
UpperCAmelCase = inputs_dict['''labels''']
UpperCAmelCase = inputs_dict['''labels''']
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , n_embd=37 )
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowercase ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 358 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _snake_case :
__lowerCAmelCase : Optional[Any] = MBartConfig
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Any = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : str = use_labels
lowercase__ : Any = vocab_size
lowercase__ : str = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Any = eos_token_id
lowercase__ : Dict = pad_token_id
lowercase__ : Dict = bos_token_id
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowercase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowercase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1)
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Tuple = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = TFMBartModel(config=SCREAMING_SNAKE_CASE_).get_decoder()
lowercase__ : Optional[Any] = inputs_dict["""input_ids"""]
lowercase__ : List[str] = input_ids[:1, :]
lowercase__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
lowercase__ : List[str] = inputs_dict["""head_mask"""]
lowercase__ : Any = 1
# first forward pass
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : int = outputs.to_tuple()
lowercase__ : str = past_key_values[1]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
lowercase__ : Optional[int] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__lowerCAmelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase : Optional[Any] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : str = False
__lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = TFMBartModelTester(self)
lowercase__ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_)
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
__lowerCAmelCase : Any = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__lowerCAmelCase : int = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase__ ( self):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.translate_src_text(**SCREAMING_SNAKE_CASE_)
self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , return_tensors="""tf""")
lowercase__ : Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2)
lowercase__ : Union[str, Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
return generated_words
@slow
def lowercase__ ( self):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 12 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__: List[Any] = logging.getLogger(__name__)
a__: str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a__: Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__SCREAMING_SNAKE_CASE = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , )->str:
def _dataset(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase__( )->List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
A__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
A__ = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
A__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
A__ = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
A__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate()
A__ = math.exp(eval_output['''eval_loss'''] )
A__ = {'''perplexity''': perplexity}
A__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def UpperCamelCase__( UpperCamelCase__ : List[str] )->List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 190 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
__lowerCAmelCase = emb.weight.data
return lin_layer
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = Namespace(**checkpoint["cfg"]["model"] )
__lowerCAmelCase = checkpoint["model"]
remove_ignore_keys_(_UpperCamelCase )
__lowerCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0]
__lowerCAmelCase = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__lowerCAmelCase = XGLMConfig(
vocab_size=_UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowerCAmelCase = XGLMForCausalLM(_UpperCamelCase )
__lowerCAmelCase = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
print(_UpperCamelCase )
__lowerCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
A : Dict = parser.parse_args()
A : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 282 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCamelCase ( _UpperCamelCase = "dhaka" , _UpperCamelCase = 5 ):
'''simple docstring'''
__lowerCAmelCase = min(_UpperCamelCase , 50 ) # Prevent abuse!
__lowerCAmelCase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowerCAmelCase = requests.get("https://www.google.com/search" , params=_UpperCamelCase , headers=_UpperCamelCase )
__lowerCAmelCase = BeautifulSoup(html.text , "html.parser" )
__lowerCAmelCase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowerCAmelCase = json.dumps(_UpperCamelCase )
__lowerCAmelCase = json.loads(_UpperCamelCase )
__lowerCAmelCase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _UpperCamelCase , )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_UpperCamelCase ) , )
__lowerCAmelCase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _UpperCamelCase , )
for index, fixed_full_res_image in enumerate(_UpperCamelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_UpperCamelCase )
__lowerCAmelCase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCamelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
A : Any = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 282 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.