code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import math
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 10_001 ):
try:
__a : Any = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__a : list[int] = []
__a : List[Any] = 2
while len(_SCREAMING_SNAKE_CASE ) < nth:
if is_prime(_SCREAMING_SNAKE_CASE ):
primes.append(_SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(_SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : int = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "beit"
def __init__( self , __a=8192 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-1_2 , __a=224 , __a=16 , __a=3 , __a=False , __a=False , __a=False , __a=False , __a=0.1 , __a=0.1 , __a=True , __a=[3, 5, 7, 11] , __a=[1, 2, 3, 6] , __a=True , __a=0.4 , __a=256 , __a=1 , __a=False , __a=255 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Any = vocab_size
__a : Optional[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Tuple = intermediate_size
__a : str = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Any = image_size
__a : Optional[Any] = patch_size
__a : Dict = num_channels
__a : List[str] = use_mask_token
__a : Any = use_absolute_position_embeddings
__a : List[str] = use_relative_position_bias
__a : int = use_shared_relative_position_bias
__a : int = layer_scale_init_value
__a : Union[str, Any] = drop_path_rate
__a : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__a : List[str] = out_indices
__a : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__a : Any = use_auxiliary_head
__a : Union[str, Any] = auxiliary_loss_weight
__a : List[Any] = auxiliary_channels
__a : Union[str, Any] = auxiliary_num_convs
__a : Dict = auxiliary_concat_input
__a : Tuple = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = version.parse("1.11" )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 0
__a : int = [0]
__a : List[str] = [0]
__a : Dict = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
__a : Any = [60]
__a : Optional[int] = [10]
__a : List[str] = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 3
__a : int = [1, 2, 3]
__a : Any = [3, 2, 1]
__a : Optional[int] = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 5 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 50
__a : Optional[Any] = [60, 100, 120]
__a : int = [10, 20, 30]
__a : Tuple = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 220 )
if __name__ == "__main__":
unittest.main()
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowercase : Optional[Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[int] = torch.manual_seed(0 )
__a : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__a : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = generator.manual_seed(0 )
__a : Dict = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : str = 'cyberpunk 2077'
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : List[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : Union[str, Any] = 'A painting of a squirrel eating a burger '
__a : List[str] = torch.manual_seed(0 )
__a : List[str] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a : str = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__a : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowercase : Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ():
__a : List[Any] = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_SCREAMING_SNAKE_CASE , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_SCREAMING_SNAKE_CASE , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_SCREAMING_SNAKE_CASE , default=1_000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_SCREAMING_SNAKE_CASE , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_SCREAMING_SNAKE_CASE , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_SCREAMING_SNAKE_CASE , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
__a : List[Any] = parser.parse_args()
return args
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
def fn(_SCREAMING_SNAKE_CASE : List[Any] ):
return tokenizer(examples['text'] )
return fn
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
for i in range(len(tokenized_data['input_ids'] ) ):
__a : int = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__a : str = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
__a : List[str] = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a : Optional[int] = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
__a : int = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
__a : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
__a : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a : List[Any] = tokenize_function(_SCREAMING_SNAKE_CASE )
__a : Any = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_SCREAMING_SNAKE_CASE : Optional[int] ):
# Concatenate all texts.
__a : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a : str = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a : Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a : Optional[int] = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1_000 , num_proc=4 )
__a : Optional[int] = 0
__a : Any = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
__a : Dict = grouped_dataset[shard : shard + args.shard_size]
__a : Tuple = len(dataset_snapshot['input_ids'] )
__a : Dict = os.path.join(_SCREAMING_SNAKE_CASE , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__a : str = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[str] = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print('Wrote file {} containing {} records'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , 'w' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Any = parse_args()
main(args)
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableUnCLIPImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 32
__a : List[Any] = embedder_hidden_size
# image encoding components
__a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__a )
__a : Any = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
__a : Any = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
__a : Optional[Any] = AutoencoderKL()
__a : Optional[int] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def __UpperCAmelCase ( self , __a , __a=0 , __a=True ):
'''simple docstring'''
if str(__a ).startswith('mps' ):
__a : Optional[int] = torch.manual_seed(__a )
else:
__a : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__a : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if pil_image:
__a : List[str] = input_image * 0.5 + 0.5
__a : Optional[Any] = input_image.clamp(0 , 1 )
__a : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : Any = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : List[Any] = self.get_dummy_components()
__a : Any = StableUnCLIPImgaImgPipeline(**__a )
__a : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[str] = self.get_dummy_inputs(__a )
inputs.update({'image_embeds': None} )
__a : Optional[int] = sd_pipe(**__a ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : List[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__a )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a : Any = torch.Generator(device='cpu' ).manual_seed(0 )
__a : int = pipe(__a , 'anime turle' , generator=__a , output_type='np' )
__a : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
__a : Tuple = pipe(__a , 'anime turle' , generator=__a , output_type='np' )
__a : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__a : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a : str = pipe(
__a , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__lowercase : List[str] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__lowercase : List[str] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__lowercase : Optional[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__lowercase : Optional[int] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def __UpperCAmelCase ( self , __a , __a , __a=0.9 , __a=3 , __a=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
__a : Optional[int] = [
meteor_score.single_meteor_score(
word_tokenize(__a ) , word_tokenize(__a ) , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
else:
__a : List[Any] = [
meteor_score.single_meteor_score(__a , __a , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
return {"meteor": np.mean(__a )}
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCamelCase :
def __init__( self , __a = "cpu" , __a = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__a : List[str] = device
__a : Tuple = CLIPTokenizerFast.from_pretrained(__a )
__a : int = [0.48145466, 0.4578275, 0.40821073]
__a : str = [0.26862954, 0.26130258, 0.27577711]
__a : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a : int = torchvision.transforms.Resize(224 )
__a : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.resize(__a )
__a : List[Any] = self.center_crop(__a )
__a : Optional[Any] = self.normalize(__a )
return images
def __call__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer(text=__a , **__a )
__a : Dict = self.preprocess_img(__a )
__a : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a=10 , __a=0.01 , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=False , __a=True , __a="image" , __a=True , __a=False , __a=False , __a=False , ):
'''simple docstring'''
super().__init__()
__a : List[str] = None
__a : List[Any] = device if device else get_device()
if vqgan:
__a : int = vqgan
else:
__a : Dict = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
__a : Optional[Any] = clip
else:
__a : Dict = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__a : Optional[Any] = ProcessorGradientFlow(device=self.device )
__a : str = iterations
__a : int = lr
__a : Optional[int] = log
__a : List[str] = make_grid
__a : Optional[int] = return_val
__a : int = quantize
__a : Any = self.vqgan.decoder.z_shape
def __UpperCAmelCase ( self , __a=None , __a=None , __a=5 , __a=True ):
'''simple docstring'''
__a : Union[str, Any] = []
if output_path is None:
__a : str = './animation.gif'
if input_path is None:
__a : List[str] = self.save_path
__a : Dict = sorted(glob(input_path + '/*' ) )
if not len(__a ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__a ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__a : Any = total_duration / len(__a )
__a : Optional[int] = [frame_duration] * len(__a )
if extend_frames:
__a : Tuple = 1.5
__a : Any = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(f"""gif saved to {output_path}""" )
def __UpperCAmelCase ( self , __a=None , __a=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__a : Optional[int] = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
__a : Optional[Any] = preprocess_vqgan(__a )
__a , *__a : Optional[int] = self.vqgan.encode(__a )
return z
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = self.latent.detach().requires_grad_()
__a : str = base_latent + transform_vector
if self.quantize:
__a , *__a : Optional[Any] = self.vqgan.quantize(__a )
else:
__a : Tuple = trans_latent
return self.vqgan.decode(__a )
def __UpperCAmelCase ( self , __a , __a , __a=None ):
'''simple docstring'''
__a : Union[str, Any] = self.clip_preprocessor(text=__a , images=__a , return_tensors='pt' , padding=__a )
__a : Dict = self.clip(**__a )
__a : List[str] = clip_outputs.logits_per_image
if weights is not None:
__a : Tuple = similarity_logits * weights
return similarity_logits.sum()
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = self._get_clip_similarity(pos_prompts['prompts'] , __a , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__a : List[Any] = self._get_clip_similarity(neg_prompts['prompts'] , __a , weights=neg_prompts['weights'] )
else:
__a : str = torch.tensor([1] , device=self.device )
__a : Optional[int] = -torch.log(__a ) + torch.log(__a )
return loss
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Dict = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
__a : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a : str = self._add_vector(__a )
__a : int = loop_post_process(__a )
__a : Optional[Any] = self._get_CLIP_loss(__a , __a , __a )
print('CLIP loss' , __a )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
wandb.init(reinit=__a , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__a : List[Any] = Image.open(__a )
__a : int = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(__a ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if not prompts:
return []
__a : Tuple = []
__a : Union[str, Any] = []
if isinstance(__a , __a ):
__a : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
__a : List[str] = prompt[0]
__a : Any = float(prompt[1] )
elif ":" in prompt:
__a , __a : Optional[Any] = prompt.split(':' )
__a : Optional[Any] = float(__a )
else:
__a : Union[str, Any] = prompt
__a : Optional[int] = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def __UpperCAmelCase ( self , __a , __a=None , __a=None , __a=True , __a=False , __a=True , __a=True , __a=None , ):
'''simple docstring'''
if image_path:
__a : Any = self._get_latent(__a )
else:
__a : Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
__a : Optional[int] = self.process_prompts(__a )
__a : Optional[int] = self.process_prompts(__a )
if save_final and save_path is None:
__a : int = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
__a : Tuple = save_path + '_' + get_timestamp()
os.makedirs(__a )
__a : Optional[int] = save_path
__a : Union[str, Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__a ) )
__a : List[Any] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int | float | str , _SCREAMING_SNAKE_CASE : int | float | str ):
if nth_term == "":
return [""]
__a : str = int(_SCREAMING_SNAKE_CASE )
__a : str = int(_SCREAMING_SNAKE_CASE )
__a : list[str] = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(F"""1 / {pow(temp + 1 , int(_SCREAMING_SNAKE_CASE ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Tuple = int(input('Enter the last number (nth term) of the P-Series'))
__lowercase : List[str] = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 294
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowercase : Dict = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowercase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
__a : Dict = Image.open(_SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class __UpperCamelCase :
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the training data."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the validation data."} )
A_ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class __UpperCamelCase :
A_ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Tuple = torch.stack([example['pixel_values'] for example in examples] )
__a : str = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__a : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a : Union[str, Any] = {}
if data_args.train_dir is not None:
__a : Any = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__a : Optional[Any] = os.path.join(data_args.validation_dir , '**' )
__a : Any = load_dataset(
'imagefolder' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__a : Optional[Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__a : Any = dataset['train'].train_test_split(data_args.train_val_split )
__a : str = split['train']
__a : Optional[int] = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__a : Dict = dataset['train'].features['labels'].names
__a , __a : Dict = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[str] = str(_SCREAMING_SNAKE_CASE )
__a : str = label
# Load the accuracy metric from the datasets package
__a : int = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : int ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__a : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__a : Tuple = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__a : List[Any] = image_processor.size['shortest_edge']
else:
__a : str = (image_processor.size['height'], image_processor.size['width'])
__a : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__a : List[Any] = Compose(
[
RandomResizedCrop(_SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__a : str = Compose(
[
Resize(_SCREAMING_SNAKE_CASE ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Any = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_SCREAMING_SNAKE_CASE : int ):
__a : Optional[int] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__a : List[str] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__a : Tuple = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initalize our trainer
__a : Optional[int] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__a : Dict = None
if training_args.resume_from_checkpoint is not None:
__a : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Any = last_checkpoint
__a : List[Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a : str = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__a : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = tempfile.mkdtemp()
__a : List[str] = SamImageProcessor()
__a : int = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Tuple = SamProcessor(image_processor=__a )
__a : Tuple = self.prepare_image_inputs()
__a : Union[str, Any] = image_processor(__a , return_tensors='np' )
__a : Tuple = processor(images=__a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : List[str] = SamProcessor(image_processor=__a )
__a : Tuple = [torch.ones((1, 3, 5, 5) )]
__a : int = [[1764, 2646]]
__a : Optional[int] = [[683, 1024]]
__a : Union[str, Any] = processor.post_process_masks(__a , __a , __a )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : int = processor.post_process_masks(
__a , torch.tensor(__a ) , torch.tensor(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__a : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__a : Optional[Any] = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(__a ):
__a : Optional[int] = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
__a : Union[str, Any] = SamImageProcessor()
__a : str = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[str] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : str = SamProcessor(image_processor=__a )
__a : List[Any] = self.prepare_image_inputs()
__a : Optional[Any] = image_processor(__a , return_tensors='np' )
__a : int = processor(images=__a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : int = SamProcessor(image_processor=__a )
__a : Optional[int] = [tf.ones((1, 3, 5, 5) )]
__a : Optional[Any] = [[1764, 2646]]
__a : Union[str, Any] = [[683, 1024]]
__a : Any = processor.post_process_masks(__a , __a , __a , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : int = processor.post_process_masks(
__a , tf.convert_to_tensor(__a ) , tf.convert_to_tensor(__a ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__a : List[str] = [np.ones((1, 3, 5, 5) )]
__a : str = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__a : Optional[Any] = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors='tf' )
@require_vision
@require_torchvision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = tempfile.mkdtemp()
__a : int = SamImageProcessor()
__a : Tuple = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Any = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : str = SamProcessor(image_processor=__a )
__a : List[str] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__a : Tuple = [tf.convert_to_tensor(__a )]
__a : Any = [torch.tensor(__a )]
__a : List[Any] = [[1764, 2646]]
__a : Optional[int] = [[683, 1024]]
__a : Optional[int] = processor.post_process_masks(
__a , __a , __a , return_tensors='tf' )
__a : int = processor.post_process_masks(
__a , __a , __a , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : Dict = SamProcessor(image_processor=__a )
__a : Optional[Any] = self.prepare_image_inputs()
__a : Any = image_processor(__a , return_tensors='pt' )['pixel_values'].numpy()
__a : str = processor(images=__a , return_tensors='pt' )['pixel_values'].numpy()
__a : List[Any] = image_processor(__a , return_tensors='tf' )['pixel_values'].numpy()
__a : Tuple = processor(images=__a , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
| 294
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowercase : List[Any] = TypeVar('KEY')
__lowercase : str = TypeVar('VAL')
@dataclass(frozen=lowerCAmelCase_ , slots=lowerCAmelCase_ )
class __UpperCamelCase ( Generic[KEY, VAL] ):
A_ = 42
A_ = 42
class __UpperCamelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__a , __a )
def __bool__( self ):
'''simple docstring'''
return False
__lowercase : Union[str, Any] = _DeletedItem()
class __UpperCamelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __a = 8 , __a = 0.75 ):
'''simple docstring'''
__a : Dict = initial_block_size
__a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__a : Optional[Any] = capacity_factor
__a : str = 0
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return hash(__a ) % len(self._buckets )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self._buckets[ind]
if not stored:
__a : Union[str, Any] = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
__a : Optional[Any] = _Item(__a , __a )
return True
else:
return False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__a : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self._buckets
__a : int = [None] * new_size
__a : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
__a : Optional[int] = self._get_next_ind(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self , __a , __a ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self , __a ):
'''simple docstring'''
for ind in self._iterate_buckets(__a ):
__a : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
__a : Tuple = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __a ):
'''simple docstring'''
for ind in self._iterate_buckets(__a ):
__a : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__a : Any = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : str = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import functools
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('All days elements should be less than 366' )
__a : List[Any] = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase ():
__a : str = 9
__a : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a : Tuple = kruskal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(_SCREAMING_SNAKE_CASE )
| 294
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowercase : List[Any] = [8, 5, 9, 7]
__lowercase : Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowercase : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __UpperCamelCase :
def __init__( self , __a , __a , __a , ):
'''simple docstring'''
__a : Tuple = claim_vector
__a : Tuple = allocated_resources_table
__a : List[str] = maximum_claim_table
def __UpperCAmelCase ( self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
__a : Optional[int] = self.__need()
__a : Any = self.__allocated_resources_table
__a : Union[str, Any] = self.__available_resources()
__a : Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a : Dict = False
for each_need in need_list:
__a : str = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__a : Union[str, Any] = False
break
if execution:
__a : Dict = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a : Dict = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__a : Optional[Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__a ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__a ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase :
A_ = 42
A_ = None
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self , __a , __a , __a , **__a ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
return f"""`pip install {cls.pip_package or cls.name}`"""
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "optuna"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_optuna_available()
def __UpperCAmelCase ( self , __a , __a , __a , **__a ):
'''simple docstring'''
return run_hp_search_optuna(__a , __a , __a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return default_hp_space_optuna(__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ray"
A_ = "'ray[tune]'"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_ray_available()
def __UpperCAmelCase ( self , __a , __a , __a , **__a ):
'''simple docstring'''
return run_hp_search_ray(__a , __a , __a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return default_hp_space_ray(__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "sigopt"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def __UpperCAmelCase ( self , __a , __a , __a , **__a ):
'''simple docstring'''
return run_hp_search_sigopt(__a , __a , __a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return default_hp_space_sigopt(__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "wandb"
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
return is_wandb_available()
def __UpperCAmelCase ( self , __a , __a , __a , **__a ):
'''simple docstring'''
return run_hp_search_wandb(__a , __a , __a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return default_hp_space_wandb(__a )
__lowercase : Optional[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase ():
__a : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a : int = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
F"""{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : Tuple ):
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(F"""{i}\t\t{d}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : int ):
for j in range(_SCREAMING_SNAKE_CASE ):
__a , __a , __a : List[str] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : List[str] = [float('inf' )] * vertex_count
__a : Union[str, Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
__a , __a , __a : Optional[int] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__a : Optional[Any] = distance[u] + w
__a : Dict = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Optional[Any] = int(input('Enter number of vertices: ').strip())
__lowercase : List[str] = int(input('Enter number of edges: ').strip())
__lowercase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__lowercase , __lowercase , __lowercase : Any = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__lowercase : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
__lowercase : Dict = int(input('\nEnter shortest path source:').strip())
__lowercase : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
__lowercase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowercase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowercase : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 10 , _SCREAMING_SNAKE_CASE : int = 22 ):
__a : List[str] = range(1 , _SCREAMING_SNAKE_CASE )
__a : int = range(1 , _SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : int = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gptj"
A_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __a=5_0400 , __a=2048 , __a=4096 , __a=28 , __a=16 , __a=64 , __a=None , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=1E-5 , __a=0.02 , __a=True , __a=5_0256 , __a=5_0256 , __a=False , **__a , ):
'''simple docstring'''
__a : Union[str, Any] = vocab_size
__a : Optional[int] = n_positions
__a : Any = n_embd
__a : str = n_layer
__a : Union[str, Any] = n_head
__a : Optional[int] = n_inner
__a : Dict = rotary_dim
__a : Dict = activation_function
__a : Dict = resid_pdrop
__a : Union[str, Any] = embd_pdrop
__a : List[Any] = attn_pdrop
__a : List[Any] = layer_norm_epsilon
__a : Dict = initializer_range
__a : List[str] = use_cache
__a : Dict = bos_token_id
__a : Tuple = eos_token_id
super().__init__(
bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = "default" , __a = None , __a = False , ):
'''simple docstring'''
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , 'pad_token_id' , __a ):
# TODO: how to do that better?
__a : List[str] = 0
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
__a : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self._config.n_head
def __UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
'''simple docstring'''
__a : List[str] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
__a : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__a , __a : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__a : List[str] = seqlen + 2
__a : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__a : Optional[int] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
__a : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
__a : List[str] = ordered_inputs['attention_mask'].dtype
__a : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowercase : List[Any] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__lowercase : Any = f'''https://www.google.com/search?q={query}&num=100'''
__lowercase : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__lowercase : List[str] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__lowercase : Tuple = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "encodec"
def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=2_4000 , __a=1 , __a=False , __a=None , __a=None , __a=128 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=1024 , __a=None , __a=True , **__a , ):
'''simple docstring'''
__a : Union[str, Any] = target_bandwidths
__a : List[str] = sampling_rate
__a : Dict = audio_channels
__a : List[str] = normalize
__a : Union[str, Any] = chunk_length_s
__a : List[str] = overlap
__a : Optional[int] = hidden_size
__a : int = num_filters
__a : Dict = num_residual_layers
__a : Tuple = upsampling_ratios
__a : Union[str, Any] = norm_type
__a : List[Any] = kernel_size
__a : Any = last_kernel_size
__a : Any = residual_kernel_size
__a : Any = dilation_growth_rate
__a : str = use_causal_conv
__a : Dict = pad_mode
__a : Any = compress
__a : Optional[int] = num_lstm_layers
__a : Tuple = trim_right_ratio
__a : Tuple = codebook_size
__a : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
__a : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __UpperCamelCase :
def __init__( self , __a , __a=99 , __a=13 , __a=7 , __a=9 , __a=True , __a=True , __a=False , __a=32 , __a=5 , __a=4 , __a=37 , __a=8 , __a=0.1 , __a=0.002 , __a=1 , __a=0 , __a=0 , __a=None , __a=None , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : Optional[int] = batch_size
__a : str = encoder_seq_length
__a : Union[str, Any] = decoder_seq_length
# For common tests
__a : List[Any] = self.decoder_seq_length
__a : Optional[Any] = is_training
__a : int = use_attention_mask
__a : int = use_labels
__a : List[Any] = vocab_size
__a : Any = hidden_size
__a : Any = num_hidden_layers
__a : List[str] = num_attention_heads
__a : int = d_ff
__a : str = relative_attention_num_buckets
__a : int = dropout_rate
__a : Optional[int] = initializer_factor
__a : Dict = eos_token_id
__a : Tuple = pad_token_id
__a : List[Any] = decoder_start_token_id
__a : Optional[int] = None
__a : Tuple = decoder_layers
def __UpperCAmelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self , __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
'''simple docstring'''
if attention_mask is None:
__a : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
__a : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
__a : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a : int = input_ids.clamp(self.pad_token_id + 1 )
__a : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a : Tuple = self.get_config()
__a : Union[str, Any] = config.num_attention_heads
__a : Any = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
__a : str = UMTaModel(config=__a )
model.to(__a )
model.eval()
__a : Any = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
__a : Optional[Any] = model(input_ids=__a , decoder_input_ids=__a )
__a : Union[str, Any] = result.last_hidden_state
__a : Dict = result.past_key_values
__a : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
__a : str = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
__a : str = model(__a , use_cache=__a )
__a : Tuple = model(__a )
__a : List[str] = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__a , __a : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : Any = model(__a )['last_hidden_state']
__a : Any = model(__a , past_key_values=__a )['last_hidden_state']
# select random slice
__a : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : Dict = output_from_no_past[:, -1, random_slice_idx].detach()
__a : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def __UpperCAmelCase ( self , __a , __a , ):
'''simple docstring'''
__a : Union[str, Any] = UMTaModel(config=__a ).to(__a ).half().eval()
__a : Optional[Any] = model(**__a )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = True
A_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ = [0.8, 0.9]
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
__a : int = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
__a : str = config_and_inputs[0]
__a : List[str] = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
__a : str = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=__a ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
__a : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
__a : Any = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=__a ).to(__a )
__a : Optional[Any] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=__a , legacy=__a )
__a : List[str] = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__a : Any = tokenizer(__a , return_tensors='pt' , padding=__a ).input_ids
# fmt: off
__a : str = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
__a : Any = model.generate(input_ids.to(__a ) )
__a : Union[str, Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__a : Dict = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a )
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : List[Any] = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
__a : Optional[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a : List[str] = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
__a : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__lowercase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowercase : Any = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase ():
__a : Union[str, Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__a : List[Any] = 6
__a : Dict = 1
__a : str = 1_901
__a : Dict = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__a : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__a : List[str] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__a : Union[str, Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
__a : List[Any] = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_000 ):
__a : Any = 3
__a : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=[30, 30] , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=3 , __a=None , __a=8 , __a=10 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : List[str] = image_size
__a : Dict = patch_size
__a : Optional[int] = num_channels
__a : str = is_training
__a : Tuple = use_labels
__a : Union[str, Any] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : int = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Union[str, Any] = type_sequence_label_size
__a : List[Any] = initializer_range
__a : Optional[int] = num_labels
__a : Tuple = scope
__a : Tuple = n_targets
__a : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__a : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__a : Optional[int] = num_patches + 1 + self.num_detection_tokens
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__a : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__a : str = []
for i in range(self.batch_size ):
__a : int = {}
__a : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a )
__a : Dict = torch.rand(self.n_targets , 4 , device=__a )
labels.append(__a )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = YolosModel(config=__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : str = YolosForObjectDetection(__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(pixel_values=__a )
__a : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__a : Tuple = model(pixel_values=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : List[str] = config_and_inputs
__a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
A_ = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__a : Union[str, Any] = []
for i in range(self.model_tester.batch_size ):
__a : int = {}
__a : Any = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long )
__a : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float )
labels.append(__a )
__a : List[Any] = labels
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = YolosModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(__a )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
# in YOLOS, the seq_len is different
__a : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__a : Tuple = True
__a : Tuple = False
__a : Optional[Any] = True
__a : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : Optional[int] = model(**self._prepare_for_class(__a , __a ) )
__a : int = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[int] = True
__a : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : int = model(**self._prepare_for_class(__a , __a ) )
__a : List[Any] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__a : Optional[Any] = len(__a )
# Check attention is always last and order is fine
__a : Union[str, Any] = True
__a : Optional[Any] = True
__a : Optional[int] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(__a , __a ) )
__a : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(__a ) )
__a : Any = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(__a , __a ) )
__a : Tuple = outputs.hidden_states
__a : List[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
# YOLOS has a different seq_length
__a : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[int] = YolosModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(__a )
__a : Union[str, Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : Tuple = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
__a : List[str] = model(inputs.pixel_values )
# verify outputs
__a : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__a , )
__a : Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4 ) )
# verify postprocessing
__a : int = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__a : str = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__a )
__a : List[Any] = [75, 75, 17, 63, 17]
__a : Optional[Any] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , __a , atol=1E-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , __a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , __a ) )
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
class __UpperCamelCase :
def __init__( self , __a , __a=None , __a=None ):
'''simple docstring'''
__a : Any = data
__a : List[Any] = previous
__a : Optional[int] = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.next
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.previous
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[int] = head
def __iter__( self ):
'''simple docstring'''
return self
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__a : Dict = self.current.get_data()
__a : Tuple = self.current.get_next()
return value
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Union[str, Any] = None # First node in list
__a : Tuple = None # Last node in list
def __str__( self ):
'''simple docstring'''
__a : Dict = self.head
__a : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__a : Dict = current.get_next()
return " ".join(str(__a ) for node in nodes )
def __contains__( self , __a ):
'''simple docstring'''
__a : Dict = self.head
while current:
if current.get_data() == value:
return True
__a : Union[str, Any] = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.head is None:
__a : List[str] = node
__a : List[str] = node
else:
self.insert_before_node(self.head , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.head is None:
self.set_head(__a )
else:
self.insert_after_node(self.tail , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = Node(__a )
if self.head is None:
self.set_head(__a )
else:
self.set_tail(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Optional[int] = node
__a : Tuple = node.previous
if node.get_previous() is None:
__a : Any = node_to_insert
else:
__a : Optional[int] = node_to_insert
__a : Tuple = node_to_insert
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : int = node
__a : Any = node.next
if node.get_next() is None:
__a : Optional[Any] = node_to_insert
else:
__a : Any = node_to_insert
__a : List[str] = node_to_insert
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : Tuple = Node(__a )
__a : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__a , __a )
return
current_position += 1
__a : Tuple = node.next
self.insert_after_node(self.tail , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = self.head
while node:
if node.get_data() == item:
return node
__a : Dict = node.get_next()
raise Exception('Node not found' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if (node := self.get_node(__a )) is not None:
if node == self.head:
__a : Optional[Any] = self.head.get_next()
if node == self.tail:
__a : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__a )
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
if node.get_next():
__a : Optional[int] = node.previous
if node.get_previous():
__a : Any = node.next
__a : List[Any] = None
__a : Tuple = None
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def lowerCamelCase ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ['PerceiverFeatureExtractor']
__lowercase : Optional[int] = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__a , )
assert hasattr(self , 'env' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = {
'enabled': True,
'processes_per_host': 8,
}
__a : str = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__a : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__a : Tuple = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version='py36' , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
TrainingJobAnalytics(__a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
__a : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __a )
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int | float] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_SCREAMING_SNAKE_CASE )
or left < -len(_SCREAMING_SNAKE_CASE )
or right >= len(_SCREAMING_SNAKE_CASE )
or right < -len(_SCREAMING_SNAKE_CASE )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__a : int = (left + right) >> 1 # the middle
__a : Union[str, Any] = find_max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # find max in range[left, mid]
__a : Union[str, Any] = find_max(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.dummy_uncond_unet
__a : Dict = KarrasVeScheduler()
__a : List[str] = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : int = torch.manual_seed(0 )
__a : Any = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' ).images
__a : Dict = torch.manual_seed(0 )
__a : Optional[Any] = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a )[0]
__a : Dict = image[0, -3:, -3:, -1]
__a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Optional[int] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'google/ncsnpp-celebahq-256'
__a : str = UNetaDModel.from_pretrained(__a )
__a : Dict = KarrasVeScheduler()
__a : Optional[Any] = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(num_inference_steps=20 , generator=__a , output_type='numpy' ).images
__a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a : List[str] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__lowercase : Optional[int] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowercase : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
__lowercase : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
__lowercase : str = '▁'
# Segments (not really needed)
__lowercase : Dict = 0
__lowercase : Optional[int] = 1
__lowercase : Any = 2
__lowercase : List[str] = 3
__lowercase : int = 4
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = "left"
A_ = XLNetTokenizer
def __init__( self , __a=None , __a=None , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , **__a , ):
'''simple docstring'''
__a : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__a : Any = 3
__a : Optional[int] = do_lower_case
__a : Optional[int] = remove_space
__a : Optional[Any] = keep_accents
__a : Union[str, Any] = vocab_file
__a : Tuple = False if not self.vocab_file else True
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Union[str, Any] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__lowercase : List[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
A_ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the training data."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the validation data."} )
A_ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
A_ = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
A_ = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = {}
if self.train_dir is not None:
__a : int = self.train_dir
if self.validation_dir is not None:
__a : Optional[int] = self.validation_dir
__a : Optional[int] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Stride to use for the encoder."} , )
class __UpperCamelCase :
def __init__( self , __a=192 , __a=32 , __a=4 , __a=0.6 ):
'''simple docstring'''
__a : Optional[int] = input_size
__a : str = mask_patch_size
__a : Optional[int] = model_patch_size
__a : Optional[int] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__a : int = self.input_size // self.mask_patch_size
__a : Union[str, Any] = self.mask_patch_size // self.model_patch_size
__a : List[str] = self.rand_size**2
__a : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
'''simple docstring'''
__a : List[Any] = np.random.permutation(self.token_count )[: self.mask_count]
__a : int = np.zeros(self.token_count , dtype=__a )
__a : Union[str, Any] = 1
__a : Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
__a : Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
__a : Optional[Any] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : List[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__a : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__a : List[str] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__a : List[str] = ds['train'].train_test_split(data_args.train_val_split )
__a : Optional[Any] = split['train']
__a : int = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__a : Dict = AutoConfig.from_pretrained(model_args.config_name_or_path , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__a : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_SCREAMING_SNAKE_CASE , 'decoder_type' ):
__a : str = 'simmim'
# adapt config
__a : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
__a : List[str] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__a : List[str] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__a : Dict = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__a : int = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__a : Tuple = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__a : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__a : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__a : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
__a : List[str] = ds['train'].column_names
else:
__a : int = ds['validation'].column_names
if data_args.image_column_name is not None:
__a : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
__a : Any = 'image'
elif "img" in column_names:
__a : Optional[int] = 'img'
else:
__a : int = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__a : List[str] = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__a : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_SCREAMING_SNAKE_CASE : Any ):
__a : Optional[int] = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
__a : Optional[Any] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__a : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__a : Optional[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Initialize our trainer
__a : Any = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__a : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__a : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Dict = last_checkpoint
__a : Optional[Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a : str = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__a : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__lowercase : List[str] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
class __UpperCamelCase :
def __init__( self , __a = 0 ):
'''simple docstring'''
__a : List[Any] = key
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
__a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
__a : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__a ) ^ key ) for ch in content]
def __UpperCAmelCase ( self , __a , __a = 0 ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
__a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__a : Union[str, Any] = ''
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def __UpperCAmelCase ( self , __a , __a = 0 ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
__a : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__a : Optional[Any] = ''
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def __UpperCAmelCase ( self , __a , __a = 0 ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
try:
with open(__a ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__a , __a ) )
except OSError:
return False
return True
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
assert isinstance(__a , __a ) and isinstance(__a , __a )
try:
with open(__a ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__a , __a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 294
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 294
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 1
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
A_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
__a : List[str] = VideoClassificationPipeline(model=__a , image_processor=__a , top_k=2 )
__a : Tuple = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
for example in examples:
__a : Union[str, Any] = video_classifier(__a )
self.assertEqual(
__a , [
{'score': ANY(__a ), 'label': ANY(__a )},
{'score': ANY(__a ), 'label': ANY(__a )},
] , )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
__a : List[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
__a : List[str] = pipeline(
'video-classification' , model=__a , feature_extractor=__a , frame_sampling_rate=4 )
__a : List[Any] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
__a : str = video_classifier(__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
__a : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowercase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase : Any = 12_80_22
__lowercase : List[str] = 12_80_28
@require_sentencepiece
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = MaMaaaTokenizer
A_ = False
A_ = False
A_ = True
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Any = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : int = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_tokenizer()
__a : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(__a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.get_tokenizer()
__a : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2, 3, 4, 5, 6] , )
__a : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(__a )
self.assertEqual(__a , 'This is a test' )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
A_ = "facebook/m2m100_418M"
A_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
A_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
A_ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : str = 1
return cls
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.tokenizer.get_vocab()
self.assertEqual(len(__a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'en'
__a : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
__a : Tuple = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(__a , skip_special_tokens=__a )
__a : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a )
__a : Optional[Any] = MaMaaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.lang_token_to_id , __a )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'en'
__a : Optional[int] = 'fr'
__a : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors='pt' )
__a : Optional[int] = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : List[str] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[int] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : int = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): # picklable for multiprocessing
return x.sum()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): # picklable for multiprocessing
return i + 1
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = 42
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = {}
__a : str = []
__a : Union[str, Any] = 1
__a : Tuple = [1, 2]
__a : Tuple = {'a': 1, 'b': 2}
__a : str = {'a': [1, 2], 'b': [3, 4]}
__a : Optional[int] = {'a': {'1': 1}, 'b': 2}
__a : List[str] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__a : Any = {}
__a : Tuple = []
__a : List[str] = 2
__a : Optional[Any] = [2, 3]
__a : Tuple = {'a': 2, 'b': 3}
__a : str = {'a': [2, 3], 'b': [4, 5]}
__a : Any = {'a': {'1': 2}, 'b': 3}
__a : int = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
__a : Optional[Any] = 2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
__a : Dict = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__a : Optional[int] = {'a': 2, 'b': 0, 'c': 2}
__a : List[str] = {
'a': np.eye(2 ).astype(__a ),
'b': np.zeros(3 ).astype(__a ),
'c': np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda __a : x + 1 , __a , num_proc=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {'a': 1, 'b': 2}
__a : Optional[Any] = {'a': 3, 'b': 4}
__a : Tuple = {'a': 5, 'b': 6}
__a : Union[str, Any] = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
class __UpperCamelCase :
A_ = "bar"
__a : List[Any] = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(__a , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__a : Optional[Any] = {F"""{i}""": i for i in range(_SCREAMING_SNAKE_CASE )}
__a : List[Any] = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCamelCase ( lowerCAmelCase_ ):
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
__a : Tuple = layers.Dense(2 )
def gen_random_output():
__a : Union[str, Any] = tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
__a : str = gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
__a : Any = gen_random_output()
__a : Union[str, Any] = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
def gen_random_output():
__a : List[Any] = torch.nn.Linear(3 , 2 )
__a : Union[str, Any] = torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
__a : List[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
__a : List[Any] = gen_random_output()
__a : int = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__a : Dict = gen_random_output()
with temp_seed(42 ):
__a : Dict = gen_random_output()
__a : Optional[Any] = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Union[str, Any] = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : str = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def lowerCamelCase ():
__a : Optional[int] = A(x=1 , y='foobar' )
__a : Union[str, Any] = {'x': 1, 'y': 'foobar'}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
__a : Dict = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
__a : Optional[Any] = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return text.split()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase ():
with Pool(2 ) as pool:
__a : Dict = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__a : Dict = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__a : Dict = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = FileLock(str(tmpdir / 'foo.lock' ) )
__a : Dict = FileLock(str(tmpdir / 'foo.lock' ) )
__a : Optional[int] = 0.0_1
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = time.time()
locka.acquire(_SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Optional[int] = 'a' * 1_000 + '.lock'
__a : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__a : Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str="attention" ):
__a : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
__a : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__a : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
__a : str = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__a : Optional[int] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
__a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__a : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
__a : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int=False ):
if split_mlp_wi:
__a : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
__a : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
__a : Optional[Any] = (wi_a, wi_a)
else:
__a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
__a : Dict = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict , *, _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : bool = False ):
__a : Optional[Any] = traverse_util.flatten_dict(variables['target'] )
__a : Optional[Any] = {'/'.join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a : Tuple = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , _SCREAMING_SNAKE_CASE )
__a : int = collections.OrderedDict()
# Shared embeddings.
__a : List[Any] = old['token_embedder/embedding']
# Encoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__a : List[str] = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_attention_layer_norm' )
__a , __a , __a , __a : int = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'attention' )
__a : Optional[Any] = layer_norm
__a : Tuple = k.T
__a : Optional[Any] = o.T
__a : Optional[int] = q.T
__a : Optional[int] = v.T
# Block i, layer 1 (MLP).
__a : Optional[int] = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , 'pre_mlp_layer_norm' )
__a , __a : Any = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' , _SCREAMING_SNAKE_CASE )
__a : Dict = layer_norm
if split_mlp_wi:
__a : Optional[Any] = wi[0].T
__a : Optional[Any] = wi[1].T
else:
__a : List[Any] = wi.T
__a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__a : Optional[int] = tax_relpos_bias_lookup(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encoder' ).T
__a : List[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
__a : str = tax_relpos_bias_lookup(
_SCREAMING_SNAKE_CASE , 0 , 'encoder' ).T
__a : Any = tax_relpos_bias_lookup(
_SCREAMING_SNAKE_CASE , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__a : Dict = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_self_attention_layer_norm' )
__a , __a , __a , __a : Any = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'self_attention' )
__a : Any = layer_norm
__a : List[str] = k.T
__a : Dict = o.T
__a : List[Any] = q.T
__a : Dict = v.T
# Block i, layer 1 (Cross Attention).
__a : int = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_cross_attention_layer_norm' )
__a , __a , __a , __a : Optional[Any] = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'encoder_decoder_attention' )
__a : List[str] = layer_norm
__a : List[str] = k.T
__a : Optional[int] = o.T
__a : Optional[int] = q.T
__a : Tuple = v.T
# Block i, layer 2 (MLP).
__a : Any = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , 'pre_mlp_layer_norm' )
__a , __a : List[str] = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = layer_norm
if split_mlp_wi:
__a : Dict = wi[0].T
__a : Optional[int] = wi[1].T
else:
__a : str = wi.T
__a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__a : Optional[int] = tax_relpos_bias_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decoder' ).T
__a : Tuple = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : bool ):
__a : List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a : Optional[Any] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a : str = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__a : Tuple = state_dict['shared.weight']
return state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : str = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
__a : str = convert_tax_to_pytorch(
_SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE , scalable_attention=_SCREAMING_SNAKE_CASE )
__a : List[Any] = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , ):
__a : Tuple = MTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a : Union[str, Any] = UMTaEncoderModel(_SCREAMING_SNAKE_CASE )
else:
__a : Any = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(_SCREAMING_SNAKE_CASE )
print('Done' )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
'''simple docstring'''
__a : List[str] = parent
__a : Union[str, Any] = config_class
__a : int = has_text_modality
__a : List[str] = kwargs
__a : List[str] = common_properties
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.config_class(**self.inputs_dict )
__a : int = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"""`{name} value {idx} expected, but was {getattr(__a , __a )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__a : List[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"""`{name} value {idx} expected, but was {getattr(__a , __a )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
__a : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : str = os.path.join(__a , 'config.json' )
config_first.to_json_file(__a )
__a : Optional[Any] = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__a : int = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.config_class(**self.inputs_dict )
__a : int = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Dict = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__a : str = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a : List[str] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.config_class.is_composition:
return
__a : Optional[int] = self.config_class()
self.parent.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = copy.deepcopy(__a )
__a : Union[str, Any] = self.config_class(**__a )
__a : Tuple = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__a : List[Any] = '\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["transformers", "torch", "note_seq"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[str] = {'vocab_file': 'spiece.model'}
__lowercase : str = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
'''simple docstring'''
__a : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__a : Union[str, Any] = 3
__a : Dict = do_lower_case
__a : List[str] = remove_space
__a : Any = keep_accents
__a : Tuple = vocab_file
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
__a : List[Any] = jieba
__a : List[str] = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__a : str = self.__dict__.copy()
__a : int = None
return state
def __setstate__( self , __a ):
'''simple docstring'''
__a : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Tuple = {}
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.remove_space:
__a : Union[str, Any] = ' '.join(inputs.strip().split() )
else:
__a : List[Any] = inputs
__a : Optional[Any] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__a : Any = unicodedata.normalize('NFKD' , __a )
__a : List[Any] = ''.join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__a : List[Any] = outputs.lower()
return outputs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self.preprocess_text(__a )
__a : List[Any] = self.sp_model.encode(__a , out_type=__a )
__a : int = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__a : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : int = cur_pieces[1:]
else:
__a : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = ''.join(__a ).replace(__a , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[int] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
__a : int = super()._decode(*__a , **__a )
__a : List[Any] = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Optional[int] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__a : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
__a : Tuple = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
__a : Optional[int] = transform(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE )
return image
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
if "visual_encoder" in key:
__a : List[Any] = re.sub('visual_encoder*' , 'vision_model.encoder' , _SCREAMING_SNAKE_CASE )
if "blocks" in key:
__a : List[Any] = re.sub(r'blocks' , 'layers' , _SCREAMING_SNAKE_CASE )
if "attn" in key:
__a : List[str] = re.sub(r'attn' , 'self_attn' , _SCREAMING_SNAKE_CASE )
if "norm1" in key:
__a : List[Any] = re.sub(r'norm1' , 'layer_norm1' , _SCREAMING_SNAKE_CASE )
if "norm2" in key:
__a : str = re.sub(r'norm2' , 'layer_norm2' , _SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
__a : List[str] = re.sub(r'encoder.norm' , 'post_layernorm' , _SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
__a : Dict = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
__a : Optional[int] = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , _SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
__a : Dict = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , _SCREAMING_SNAKE_CASE )
if "self_attn" in key:
__a : List[str] = re.sub(r'self_attn.proj' , 'self_attn.projection' , _SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=None ):
if config_path is not None:
__a : List[Any] = BlipConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__a : str = BlipForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
__a : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__a : List[Any] = blip_decoder(pretrained=_SCREAMING_SNAKE_CASE , image_size=384 , vit='base' )
__a : Optional[int] = pt_model.eval()
__a : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
__a : Optional[Any] = modified_state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = rename_key(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = value
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = 384
__a : List[Any] = load_demo_image(image_size=_SCREAMING_SNAKE_CASE , device='cpu' )
__a : int = BertTokenizer.from_pretrained('bert-base-uncased' )
__a : Dict = tokenizer(['a picture of'] ).input_ids
__a : str = hf_model.generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
__a : List[str] = hf_model.generate(_SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__a : Optional[Any] = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__a : Tuple = blip_vqa(pretrained=_SCREAMING_SNAKE_CASE , image_size=_SCREAMING_SNAKE_CASE , vit='base' )
vqa_model.eval()
__a : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__a : Optional[Any] = modified_state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = rename_key(_SCREAMING_SNAKE_CASE )
__a : str = value
__a : Dict = BlipForQuestionAnswering(_SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = ['How many dogs are in this image?']
__a : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids
__a : Any = hf_vqa_model.generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__a : Tuple = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__a : Dict = blip_itm(pretrained=_SCREAMING_SNAKE_CASE , image_size=_SCREAMING_SNAKE_CASE , vit='base' )
itm_model.eval()
__a : Dict = itm_model.state_dict()
for key in modified_state_dict.copy():
__a : Dict = modified_state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : List[Any] = rename_key(_SCREAMING_SNAKE_CASE )
__a : str = value
__a : Dict = BlipForImageTextRetrieval(_SCREAMING_SNAKE_CASE )
__a : Tuple = ['A picture of a woman with a dog sitting in a beach']
__a : Dict = tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
__a : Optional[int] = hf_itm_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_itm_head=_SCREAMING_SNAKE_CASE )
__a : int = hf_itm_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_itm_head=_SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__lowercase : str = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 294
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : List[str] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
__lowercase : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase ():
__a : Optional[int] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase ():
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase ():
__a : Dict = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase ():
__a : Any = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__a : Optional[Any] = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase ():
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def lowerCamelCase ():
# laplace diagonals
__a : List[Any] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__a : int = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def lowerCamelCase ():
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def lowerCamelCase ():
__a , __a : Optional[int] = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def lowerCamelCase ():
__a : Union[str, Any] = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ):
__a : str = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ):
__a : str = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase ():
__a : Optional[int] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__a : List[str] = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__a : Any = 0
__a : List[Any] = 0
__a : List[str] = image[x_coordinate][y_coordinate]
__a : Union[str, Any] = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__a : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__a : Optional[int] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 294
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowercase : Any = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
__lowercase : Union[str, Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
__a : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('emb.' ):
__a : int = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__a : Optional[int] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__a : Tuple = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__a : str = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__a : List[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__a : Any = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__a : int = 'rwkv.' + name
__a : Optional[int] = weight
return state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : List[Any]=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__a : Union[str, Any] = 50_277
__a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
__a : int = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
__a : Any = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a : int = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
__a : Optional[int] = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__a : List[str] = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Any = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__a , __a : Any = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
__a : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
__a : Union[str, Any] = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__a : Dict = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a : int = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__a : Optional[int] = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size='2GB' )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : str = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['LayoutLMv2FeatureExtractor']
__lowercase : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = 42
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , __a = 3 , __a = 3 , __a = ("DownEncoderBlock2D",) , __a = ("UpDecoderBlock2D",) , __a = (64,) , __a = 1 , __a = "silu" , __a = 3 , __a = 32 , __a = 256 , __a = 32 , __a = None , __a = 0.18215 , __a = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__a : Any = Encoder(
in_channels=__a , out_channels=__a , down_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , double_z=__a , )
__a : Optional[int] = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a : Dict = nn.Convad(__a , __a , 1 )
__a : Any = VectorQuantizer(__a , __a , beta=0.25 , remap=__a , sane_index_shape=__a )
__a : Dict = nn.Convad(__a , __a , 1 )
# pass init params to Decoder
__a : Optional[Any] = Decoder(
in_channels=__a , out_channels=__a , up_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , norm_type=__a , )
@apply_forward_hook
def __UpperCAmelCase ( self , __a , __a = True ):
'''simple docstring'''
__a : Optional[int] = self.encoder(__a )
__a : Optional[int] = self.quant_conv(__a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__a )
@apply_forward_hook
def __UpperCAmelCase ( self , __a , __a = False , __a = True ):
'''simple docstring'''
if not force_not_quantize:
__a , __a , __a : Optional[Any] = self.quantize(__a )
else:
__a : Union[str, Any] = h
__a : List[str] = self.post_quant_conv(__a )
__a : Dict = self.decoder(__a , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
def __UpperCAmelCase ( self , __a , __a = True ):
'''simple docstring'''
__a : Dict = sample
__a : List[Any] = self.encode(__a ).latents
__a : List[str] = self.decode(__a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Optional[Any] = [int(_SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(_SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(_SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
__lowercase : Union[str, Any] = input().strip()
__lowercase : Optional[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
'''simple docstring'''
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.array ):
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ):
__a : Tuple = tau * frequency / samplerate
__a : str = sin(_SCREAMING_SNAKE_CASE )
__a : str = cos(_SCREAMING_SNAKE_CASE )
__a : Tuple = _sin / (2 * q_factor)
__a : Any = (1 - _cos) / 2
__a : int = 1 - _cos
__a : Dict = 1 + alpha
__a : Any = -2 * _cos
__a : int = 1 - alpha
__a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ):
__a : List[Any] = tau * frequency / samplerate
__a : str = sin(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = _sin / (2 * q_factor)
__a : Tuple = (1 + _cos) / 2
__a : Tuple = -1 - _cos
__a : List[Any] = 1 + alpha
__a : Optional[Any] = -2 * _cos
__a : Dict = 1 - alpha
__a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ):
__a : Optional[Any] = tau * frequency / samplerate
__a : int = sin(_SCREAMING_SNAKE_CASE )
__a : str = cos(_SCREAMING_SNAKE_CASE )
__a : Any = _sin / (2 * q_factor)
__a : Tuple = _sin / 2
__a : str = 0
__a : str = -ba
__a : List[Any] = 1 + alpha
__a : str = -2 * _cos
__a : int = 1 - alpha
__a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ):
__a : List[str] = tau * frequency / samplerate
__a : str = sin(_SCREAMING_SNAKE_CASE )
__a : Dict = cos(_SCREAMING_SNAKE_CASE )
__a : Tuple = _sin / (2 * q_factor)
__a : Any = 1 - alpha
__a : Tuple = -2 * _cos
__a : Union[str, Any] = 1 + alpha
__a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ):
__a : str = tau * frequency / samplerate
__a : Dict = sin(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = cos(_SCREAMING_SNAKE_CASE )
__a : int = _sin / (2 * q_factor)
__a : Tuple = 10 ** (gain_db / 40)
__a : str = 1 + alpha * big_a
__a : List[str] = -2 * _cos
__a : Dict = 1 - alpha * big_a
__a : List[Any] = 1 + alpha / big_a
__a : Any = -2 * _cos
__a : List[str] = 1 - alpha / big_a
__a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ):
__a : List[str] = tau * frequency / samplerate
__a : int = sin(_SCREAMING_SNAKE_CASE )
__a : Tuple = cos(_SCREAMING_SNAKE_CASE )
__a : Any = _sin / (2 * q_factor)
__a : Any = 10 ** (gain_db / 40)
__a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
__a : str = (big_a + 1) + (big_a - 1) * _cos
__a : List[str] = (big_a - 1) - (big_a + 1) * _cos
__a : List[str] = (big_a - 1) + (big_a + 1) * _cos
__a : int = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
__a : str = big_a * (pmc + aaa)
__a : Any = 2 * big_a * mpc
__a : Union[str, Any] = big_a * (pmc - aaa)
__a : int = ppmc + aaa
__a : Optional[int] = -2 * pmpc
__a : List[Any] = ppmc - aaa
__a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ):
__a : str = tau * frequency / samplerate
__a : List[str] = sin(_SCREAMING_SNAKE_CASE )
__a : List[Any] = cos(_SCREAMING_SNAKE_CASE )
__a : str = _sin / (2 * q_factor)
__a : int = 10 ** (gain_db / 40)
__a : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
__a : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__a : Dict = (big_a - 1) - (big_a + 1) * _cos
__a : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
__a : Any = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
__a : int = big_a * (ppmc + aaa)
__a : List[str] = -2 * big_a * pmpc
__a : Tuple = big_a * (ppmc - aaa)
__a : Tuple = pmc + aaa
__a : int = 2 * mpc
__a : Any = pmc - aaa
__a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 294
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Tuple = SwinConfig(image_size=192 )
if "base" in model_name:
__a : Optional[int] = 6
__a : Optional[Any] = 128
__a : Union[str, Any] = (2, 2, 18, 2)
__a : Tuple = (4, 8, 16, 32)
elif "large" in model_name:
__a : List[Any] = 12
__a : str = 192
__a : List[str] = (2, 2, 18, 2)
__a : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__a : Tuple = window_size
__a : Optional[int] = embed_dim
__a : str = depths
__a : Dict = num_heads
return config
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
if "encoder.mask_token" in name:
__a : List[Any] = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__a : int = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__a : List[str] = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__a : Union[str, Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a : List[str] = 'layernorm.weight'
if name == "encoder.norm.bias":
__a : Optional[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
__a : Optional[Any] = 'swin.' + name
return name
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
for key in orig_state_dict.copy().keys():
__a : int = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
__a : List[str] = key.split('.' )
__a : str = int(key_split[2] )
__a : List[str] = int(key_split[4] )
__a : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a : List[Any] = val[:dim, :]
__a : Optional[int] = val[
dim : dim * 2, :
]
__a : Dict = val[-dim:, :]
else:
__a : int = val[
:dim
]
__a : Optional[Any] = val[
dim : dim * 2
]
__a : List[Any] = val[
-dim:
]
else:
__a : Any = val
return orig_state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
__a : Any = get_swin_config(_SCREAMING_SNAKE_CASE )
__a : List[str] = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.eval()
__a : List[Any] = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Any = ViTImageProcessor(size={'height': 192, 'width': 192} )
__a : Optional[int] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__a : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
with torch.no_grad():
__a : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowercase : Tuple = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : List[str] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowercase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 13_10_72,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
return torch.atana(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / math.pi * 2
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = torch.sin(t * math.pi / 2 ) ** 2
__a : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( lowerCAmelCase_ ):
pass
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : List[str] = DiffusionAttnUnetaD(__a , n_attn_layers=4 )
__a : Optional[Any] = deepcopy(self.diffusion )
__a : Any = torch.quasirandom.SobolEngine(1 , scramble=__a )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : Tuple = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
__lowercase : List[str] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__lowercase : Optional[int] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__lowercase : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__lowercase : Tuple = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__lowercase : Union[str, Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__lowercase : Any = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
for key, value in ATTN_MAP.items():
if name.startswith(_SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif name.startswith(_SCREAMING_SNAKE_CASE ):
return [name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=13 ):
__a : Optional[int] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__a : Dict = 0
if string.startswith('net.3.' ):
depth += 1
__a : str = string[6:]
elif string.startswith('net.' ):
__a : str = string[4:]
while string.startswith('main.7.' ):
depth += 1
__a : Optional[int] = string[7:]
if string.startswith('main.' ):
__a : Optional[Any] = string[5:]
# mid block
if string[:2].isdigit():
__a : Tuple = string[:2]
__a : Optional[int] = string[2:]
else:
__a : Optional[int] = string[0]
__a : Optional[Any] = string[1:]
if depth == max_depth:
__a : List[Any] = MID_NUM_TO_LAYER[layer_num]
__a : List[Any] = 'mid_block'
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) < 7:
__a : int = DOWN_NUM_TO_LAYER[layer_num]
__a : Dict = F"""down_blocks.{depth}"""
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) > 7:
__a : Tuple = UP_NUM_TO_LAYER[layer_num]
__a : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__a : List[str] = DEPTH_0_TO_LAYER[layer_num]
__a : List[Any] = F"""up_blocks.{max_depth - 1}""" if int(_SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
__a : Any = string_left[1:]
if "resnets" in new_layer:
__a : Dict = convert_resconv_naming(_SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
__a : str = convert_attn_naming(_SCREAMING_SNAKE_CASE )
__a : Tuple = new_string_left
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : List[str] = prefix + '.' + new_layer + '.' + string_left
else:
__a : List[str] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__a : Tuple = rename(_SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = transform_conv_attns(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : List[Any] = v
return new_state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ):
if len(_SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
__a : List[Any] = v[:, :, 0]
else:
# bias
__a : Optional[int] = v
else:
# qkv matrices
__a : Optional[int] = v.shape[0]
__a : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__a : Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__a : Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a : Union[str, Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__a : List[Any] = download(_SCREAMING_SNAKE_CASE )
__a : Any = MODELS_MAP[model_name]['sample_rate']
__a : Any = MODELS_MAP[model_name]['sample_size']
__a : Tuple = Object()
__a : List[Any] = sample_size
__a : List[Any] = sample_rate
__a : Optional[int] = 0
__a : Dict = UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , sample_rate=_SCREAMING_SNAKE_CASE )
__a : Any = diffusers_model.state_dict()
__a : Union[str, Any] = DiffusionUncond(_SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_SCREAMING_SNAKE_CASE )['state_dict'] )
__a : int = orig_model.diffusion_ema.eval()
__a : Any = orig_model.state_dict()
__a : Optional[int] = rename_orig_weights(_SCREAMING_SNAKE_CASE )
__a : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__a : List[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_SCREAMING_SNAKE_CASE ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(_SCREAMING_SNAKE_CASE ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__a : Dict = value.squeeze()
__a : int = value
diffusers_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__a : str = 100
__a : Optional[int] = 33
__a : str = IPNDMScheduler(num_train_timesteps=_SCREAMING_SNAKE_CASE )
__a : str = torch.manual_seed(_SCREAMING_SNAKE_CASE )
__a : List[Any] = torch.randn([1, 2, config.sample_size] , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__a : List[str] = torch.linspace(1 , 0 , steps + 1 , device=_SCREAMING_SNAKE_CASE )[:-1]
__a : Optional[Any] = get_crash_schedule(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = DanceDiffusionPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = torch.manual_seed(33 )
__a : Any = pipe(num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).audios
__a : Optional[int] = sampling.iplms_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {} )
__a : List[Any] = generated.clamp(-1 , 1 )
__a : List[Any] = (generated - audio).abs().sum()
__a : Union[str, Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , _SCREAMING_SNAKE_CASE )
print('Diff max' , _SCREAMING_SNAKE_CASE )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__lowercase : List[Any] = parser.parse_args()
main(args)
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : Any = test_results.split(' ' )
__a : Optional[int] = 0
__a : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__a : Tuple = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = {}
__a : Union[str, Any] = None
__a : Dict = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _SCREAMING_SNAKE_CASE ):
__a : Any = True
__a : int = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__a : Optional[int] = line
__a : Any = False
return failures
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
__a : Optional[int] = title
__a : int = doc_test_results['time_spent'].split(',' )[0]
__a : Any = doc_test_results['success']
__a : List[Any] = doc_test_results['failures']
__a : Optional[int] = self.n_success + self.n_failures
# Failures and success of the modeling tests
__a : Optional[Any] = doc_test_results
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = [self._time_spent]
__a : Any = 0
for time in time_spent:
__a : int = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
__a : List[str] = [0, 0, time_parts[0]]
__a , __a , __a : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__a , __a , __a : List[Any] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"""{int(__a )}h{int(__a )}m{int(__a )}s"""
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 40
__a : Dict = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__a , __a )}
__a : Optional[int] = ''
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def __UpperCAmelCase ( ):
'''simple docstring'''
__a : Union[str, Any] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=__a , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
__a : Optional[int] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
__a : Dict = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=__a , )
def __UpperCAmelCase ( self , __a , __a , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = ''
for key, value in failures.items():
__a : Dict = value[:200] + ' [Truncated]' if len(__a ) > 250 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
__a : List[str] = job_name
__a : Dict = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__a : int = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
__a : Optional[int] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
__a : Optional[int] = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
__a : List[str] = f"""*Num failures* :{len(job_result["failed"] )} \n"""
__a : Optional[Any] = job_result['failures']
__a : List[Any] = self.get_reply_blocks(__a , __a , __a , text=__a )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"""Results for {job}""" , blocks=__a , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowerCamelCase ():
__a : Tuple = os.environ['GITHUB_RUN_ID']
__a : int = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
__a : Tuple = requests.get(_SCREAMING_SNAKE_CASE ).json()
__a : Optional[Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__a : Optional[int] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
__a : str = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _SCREAMING_SNAKE_CASE )
return {}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Optional[Any] = {}
if os.path.exists(_SCREAMING_SNAKE_CASE ):
__a : str = os.listdir(_SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
__a : Any = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}.""" ) from e
return _artifact
def lowerCamelCase ():
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : str = name
__a : Dict = []
def __str__( self ):
'''simple docstring'''
return self.name
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
__a : Dict[str, Artifact] = {}
__a : Tuple = filter(os.path.isdir , os.listdir() )
for directory in directories:
__a : List[str] = directory
if artifact_name not in _available_artifacts:
__a : Union[str, Any] = Artifact(_SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(_SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
__lowercase : Union[str, Any] = get_job_links()
__lowercase : Optional[Any] = retrieve_available_artifacts()
__lowercase : int = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : List[str] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : List[str] = github_actions_job_links.get('run_doctests')
__lowercase : Any = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__lowercase : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Optional[int] = handle_test_results(artifact['stats'])
__lowercase : Union[str, Any] = failed
__lowercase : Dict = success
__lowercase : Optional[Any] = time_spent[1:-1] + ', '
__lowercase : Tuple = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__lowercase : Tuple = line.replace('FAILED ', '')
__lowercase : str = line.split()[0].replace('\n', '')
if "::" in line:
__lowercase , __lowercase : str = line.split('::')
else:
__lowercase , __lowercase : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : str = all_failures[test] if test in all_failures else 'N/A'
__lowercase : Any = failure
break
__lowercase : Dict = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Optional[int] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
while b:
__a , __a : Tuple = b, a % b
return a
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
return a if b == 0 else euclidean_gcd_recursive(_SCREAMING_SNAKE_CASE , a % b )
def lowerCamelCase ():
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
A_ = "switch_transformers"
A_ = ["past_key_values"]
A_ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __a=3_2128 , __a=768 , __a=64 , __a=2048 , __a=64 , __a=12 , __a=3 , __a=12 , __a=3 , __a=12 , __a=8 , __a=False , __a=0.01 , __a="float32" , __a=False , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=0.001 , __a=0.001 , __a=1.0 , __a="relu" , __a=True , __a=False , __a=True , __a=0 , __a=1 , **__a , ):
'''simple docstring'''
__a : Tuple = vocab_size
__a : List[str] = d_model
__a : List[Any] = d_kv
__a : str = d_ff
__a : str = num_sparse_encoder_layers
__a : Any = num_layers
__a : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Any = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a : List[str] = self.num_layers // self.num_sparse_encoder_layers
else:
__a : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a : Tuple = num_heads
__a : int = num_experts
__a : Tuple = expert_capacity
__a : List[str] = router_bias
__a : Tuple = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
__a : Optional[Any] = router_dtype
__a : Dict = router_ignore_padding_tokens
__a : Dict = relative_attention_num_buckets
__a : Union[str, Any] = relative_attention_max_distance
__a : Dict = dropout_rate
__a : int = layer_norm_epsilon
__a : str = initializer_factor
__a : Optional[int] = feed_forward_proj
__a : List[str] = use_cache
__a : List[str] = add_router_probs
__a : Optional[int] = router_z_loss_coef
__a : Optional[int] = router_aux_loss_coef
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : str = act_info[-1]
__a : Optional[Any] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a : int = 'gelu_new'
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
| 350
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( A__ , unittest.TestCase ):
A_ = GPTSanJapaneseTokenizer
A_ = False
A_ = {"do_clean_text": False, "add_prefix_space": False}
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
__a : Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a : List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a : int = {"""unk_token""": """<unk>"""}
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a : Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = self.get_input_output_texts(__A )
__a : List[str] = tokenizer.encode(__A , add_special_tokens=__A )
__a : str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
__a : Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
__a : Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a : Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
__a : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a : List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
__a : Any = tokens + [tokenizer.unk_token]
__a : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_tokenizer()
# Testing tokenization
__a : Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a : str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a : str = tokenizer.encode(__A )
__a : Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__a : Optional[Any] = """こんにちは、世界。"""
__a : Any = """こんばんは、㔺界。😀"""
__a : Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__a : List[Any] = tokenizer.encode(prefix_text + input_text )
__a : List[str] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__a : int = tokenizer.encode(__A , prefix_text=__A )
__a : int = tokenizer.decode(__A )
__a : Dict = tokenizer.decode(__A )
__a : Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__a : List[Any] = """こんにちは、世界。"""
__a : Optional[int] = """こんばんは、㔺界。😀"""
__a : List[str] = len(tokenizer.encode(__A ) ) - 2
__a : Dict = len(tokenizer.encode(__A ) ) - 2
__a : int = [1] + [0] * (len_prefix + len_text + 1)
__a : List[Any] = [1] * (len_prefix + len_text + 1) + [0]
__a : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a : List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
__a : List[str] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__a : List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__a : int = tokenizer.encode('あンいワ' )
__a : Optional[Any] = tokenizer.encode('' , prefix_text='あンいワ' )
__a : int = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__a : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a : Dict = tokenizer(__A , padding=__A )
__a : Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
__a : int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
__a : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 351
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 0
|
'''simple docstring'''
import os
import sys
import transformers
__lowercase : Optional[int] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 352
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
A_ = KandinskyVaaControlnetPipeline
A_ = ["image_embeds", "negative_image_embeds", "hint"]
A_ = ["image_embeds", "negative_image_embeds", "hint"]
A_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A_ = False
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__a : Optional[int] = UNetaDConditionModel(**__snake_case )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.dummy_unet
__a : Optional[Any] = self.dummy_movq
__a : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=__snake_case , )
__a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create hint
__a : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('mps' ):
__a : Union[str, Any] = torch.manual_seed(__snake_case )
else:
__a : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu'
__a : Any = self.get_dummy_components()
__a : str = self.pipeline_class(**__snake_case )
__a : str = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a : Optional[int] = pipe(**self.get_dummy_inputs(__snake_case ) )
__a : Optional[Any] = output.images
__a : Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__a : int = image[0, -3:, -3:, -1]
__a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__a : Tuple = torch.from_numpy(np.array(__snake_case ) ).float() / 255.0
__a : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__a : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__a : int = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__a : List[str] = 'A robot, 4k photo'
__a : Any = torch.Generator(device='cuda' ).manual_seed(0 )
__a , __a : Any = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__a : List[str] = torch.Generator(device='cuda' ).manual_seed(0 )
__a : Optional[Any] = pipeline(
image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=100 , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowercase : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : tuple , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=False , ):
output_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , use_external_data_format=lowerCamelCase__ , enable_onnx_checker=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
else:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
__a : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : Tuple = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__a : str = '''cpu'''
__a : Dict = Path(lowerCamelCase__ )
# VAE DECODER
__a : Optional[int] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__a : Tuple = vae_decoder.config.latent_channels
# forward only through the decoder part
__a : Any = vae_decoder.decode
onnx_export(
lowerCamelCase__ , model_args=(
torch.randn(1 , lowerCamelCase__ , 25 , 25 ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowerCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowercase : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 354
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : int = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCamelCase ( lowercase__ ):
A_ = '''wavlm'''
def __init__( self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1E-5 , __a="group" , __a="gelu" , __a=(512, 512, 512, 512, 512, 512, 512) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=128 , __a=16 , __a=320 , __a=800 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=320 , __a=2 , __a=0.1 , __a=100 , __a=256 , __a=256 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=256 , __a=(512, 512, 512, 512, 1500) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=512 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ):
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__a : Any = hidden_size
__a : Union[str, Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Optional[int] = list(_a )
__a : str = list(_a )
__a : Dict = list(_a )
__a : int = conv_bias
__a : Dict = num_buckets
__a : Optional[Any] = max_bucket_distance
__a : List[str] = num_conv_pos_embeddings
__a : int = num_conv_pos_embedding_groups
__a : Union[str, Any] = len(self.conv_dim )
__a : Optional[int] = num_hidden_layers
__a : Any = intermediate_size
__a : Any = hidden_act
__a : Optional[int] = num_attention_heads
__a : str = hidden_dropout
__a : Tuple = attention_dropout
__a : Union[str, Any] = activation_dropout
__a : str = feat_proj_dropout
__a : str = final_dropout
__a : List[str] = layerdrop
__a : List[str] = layer_norm_eps
__a : str = initializer_range
__a : str = num_ctc_classes
__a : Union[str, Any] = vocab_size
__a : Dict = do_stable_layer_norm
__a : Optional[int] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : List[str] = apply_spec_augment
__a : Tuple = mask_time_prob
__a : Optional[Any] = mask_time_length
__a : List[str] = mask_time_min_masks
__a : Tuple = mask_feature_prob
__a : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Any = num_codevector_groups
__a : Any = contrastive_logits_temperature
__a : Union[str, Any] = num_negatives
__a : Tuple = codevector_dim
__a : Tuple = proj_codevector_dim
__a : List[Any] = diversity_loss_weight
# ctc loss
__a : int = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# adapter
__a : Optional[Any] = add_adapter
__a : int = adapter_kernel_size
__a : List[Any] = adapter_stride
__a : Tuple = num_adapter_layers
__a : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : Optional[int] = list(_a )
__a : Dict = list(_a )
__a : str = list(_a )
__a : List[str] = xvector_output_dim
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 355
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
# fmt: off
__a : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a : Optional[Any] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__a : Any = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizer()
__a : int = self.get_image_processor()
__a : Tuple = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : Any = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__a : Union[str, Any] = self.prepare_image_inputs()
__a : Optional[int] = image_processor(__a , return_tensors='np' )
__a : Tuple = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__a : List[str] = 'lower newer'
__a : Any = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__a : Tuple = 'lower newer'
__a : Optional[Any] = self.prepare_image_inputs()
__a : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : List[Any] = self.get_tokenizer()
__a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__a : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Union[str, Any] = processor.batch_decode(__a )
__a : Any = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : List[Any] = self.get_tokenizer()
__a : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__a : int = 'lower newer'
__a : Any = self.prepare_image_inputs()
__a : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 356
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Optional[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = ["ConvNextFeatureExtractor"]
__lowercase : List[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 357
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowercase : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
A_ = ['input_values', 'padding_mask']
def __init__( self , __a = 1 , __a = 2_4000 , __a = 0.0 , __a = None , __a = None , **__a , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
__a : Dict = chunk_length_s
__a : str = overlap
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __a , __a = None , __a = False , __a = None , __a = None , __a = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__a : Tuple = True
__a : str = bool(
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__a : Any = [np.asarray(_UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
__a : Optional[int] = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__a : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__a : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
__a : Tuple = None
__a : Optional[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__a : Union[str, Any] = min(array.shape[0] for array in raw_audio )
__a : Dict = int(np.floor(max_length / self.chunk_stride ) )
__a : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__a : Any = max(array.shape[0] for array in raw_audio )
__a : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
__a : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
__a : Union[str, Any] = """max_length"""
else:
__a : int = input_values
# normal padding on batch
if padded_inputs is None:
__a : Optional[int] = self.pad(
_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , padding=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if padding:
__a : Tuple = padded_inputs.pop('attention_mask' )
__a : Optional[int] = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__a : Dict = example[..., None]
input_values.append(example.T )
__a : List[Any] = input_values
if return_tensors is not None:
__a : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
| 358
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
'''simple docstring'''
from math import factorial
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] = 100 ):
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 359
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : List[str] = '▁'
__lowercase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__lowercase : Union[str, Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__lowercase : List[str] = {
'facebook/xglm-564M': 20_48,
}
class __UpperCamelCase ( A_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a = None , **__a , ):
'''simple docstring'''
__a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Optional[int] = 7
__a : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__a : Tuple = len(self.sp_model )
__a : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
__a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : str = None
__a : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __a ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : List[str] = {}
__a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __UpperCAmelCase ( self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = "".join(snake_case__ ).replace(snake_case__ , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
__a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 360
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self , __a=2 , __a=3 , __a=64 , __a=None ):
'''simple docstring'''
__a : Optional[Any] = np.random.default_rng(_a )
__a : Optional[int] = length
__a : Any = rng.normal(size=(length,) ).astype(np.floataa )
__a : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , __a ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a=0 , __a=0 , __a=False ):
'''simple docstring'''
super().__init__()
__a : str = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : Any = True
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a : List[str] = False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a=0 , __a=0 , __a=False ):
'''simple docstring'''
super().__init__()
__a : List[Any] = torch.nn.Parameter(torch.tensor(_a ).float() )
__a : Union[str, Any] = torch.nn.Parameter(torch.tensor(_a ).float() )
__a : Optional[Any] = True
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a : Optional[int] = False
return x * self.a + self.b
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
__a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : Dict = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__a : List[str] = load_dataset('csv' , data_files=lowerCAmelCase__ )
__a : Union[str, Any] = datasets['train'].unique('label' )
__a : List[str] = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# max_length=None => use the model max length (it's actually the default)
__a : str = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
if "label" in examples:
__a : Optional[Any] = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : List[str] = DataLoader(tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__a : Any = DataLoader(tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 361
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , ):
__a : Tuple = coefficient_matrix.shape
__a : Dict = constant_matrix.shape
if rowsa != colsa:
__a : Dict = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowercase__ )
if colsa != 1:
__a : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowercase__ )
if rowsa != rowsa:
__a : Any = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != rowsa:
__a : List[str] = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(lowercase__ )} and {rowsa}"""
)
raise ValueError(lowercase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__a : Any = table.shape
strictly_diagonally_dominant(lowercase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase__ ):
__a : List[str] = []
for row in range(lowercase__ ):
__a : Optional[int] = 0
for col in range(lowercase__ ):
if col == row:
__a : Any = table[row][col]
elif col == cols - 1:
__a : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a : List[str] = (temp + val) / denom
new_val.append(lowercase__ )
__a : List[Any] = new_val
return [float(lowercase__ ) for i in new_val]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Union[str, Any] = table.shape
__a : List[str] = True
for i in range(0 , lowercase__ ):
__a : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __UpperCAmelCase ( *__a , **__a ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__a : Optional[Any] = image_classifier(__a , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
__a : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
] , )
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__a : Tuple = image_classifier(__a , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(__a ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
__a : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
[
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
{'score': 0.333, 'label': ANY(__a )},
],
] , )
@slow
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__a : Optional[Any] = image_classifier(__a , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__a ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__a : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__a : Dict = image_classifier(__a , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__a ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__a : int = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 363
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : Dict = logging.getLogger(__name__)
class __UpperCamelCase ( a_ ):
def __init__( self , __a=-1 ):
'''simple docstring'''
__a : List[Any] = label_idx
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
__a : Optional[Any] = mode.value
__a : Tuple = os.path.join(lowercase_ , f"""{mode}.txt""" )
__a : List[Any] = 1
__a : Tuple = []
with open(lowercase_ , encoding='utf-8' ) as f:
__a : Union[str, Any] = []
__a : Optional[Any] = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
__a : Any = []
__a : Union[str, Any] = []
else:
__a : Any = line.split(' ' )
words.append(splits[0] )
if len(lowercase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowercase_ , labels=lowercase_ ) )
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : int = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowercase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__a : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowercase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
__a : str = f.read().splitlines()
if "O" not in labels:
__a : str = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( a_ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
__a : str = f.read().splitlines()
if "O" not in labels:
__a : int = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( a_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
__a : str = mode.value
__a : Dict = os.path.join(lowercase_ , f"""{mode}.txt""" )
__a : Dict = 1
__a : Optional[int] = []
with open(lowercase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowercase_ ):
__a : List[str] = []
__a : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowercase_ ) == len(lowercase_ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Any = 0
for sentence in parse_incr(lowercase_ ):
__a : str = preds_list[example_id]
__a : Tuple = ''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(lowercase_ )
example_id += 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 364
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : List[str] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self , __a=2 , __a=3 , __a=64 , __a=None ):
'''simple docstring'''
__a : str = np.random.default_rng(__UpperCAmelCase )
__a : List[str] = length
__a : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__a : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , __a ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a=0 , __a=0 , __a=False ):
'''simple docstring'''
super().__init__()
__a : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : Any = True
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a : Optional[int] = False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a=0 , __a=0 , __a=False ):
'''simple docstring'''
super().__init__()
__a : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__a : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__a : str = True
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a : int = False
return x * self.a + self.b
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
__a : int = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__a : Tuple = load_dataset('csv' , data_files=lowerCAmelCase__ )
__a : Optional[Any] = datasets["""train"""].unique('label' )
__a : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a : List[Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
if "label" in examples:
__a : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : Optional[Any] = DataLoader(tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__a : List[Any] = DataLoader(tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 366
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 0
|
'''simple docstring'''
import pytest
__lowercase : Any = '__dummy_dataset1__'
__lowercase : int = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCamelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ):
__a : int = dataset_loading_script_name
__a : Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=a_ )
__a : int = script_dir / F"""{script_name}.py"""
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
| 367
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a : Optional[Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__a : Any = F"""{src_lang}-{tgt_lang}"""
__a : List[Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__a : Optional[Any] = os.path.join(_UpperCamelCase , 'README.md' )
print(F"""Generating {path}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__lowercase : Any = Path(__file__).resolve().parent.parent.parent
__lowercase : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase : Any = model_name.split('-')
__lowercase : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 368
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 0
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowercase : Dict = logging.getLogger(__name__)
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Tuple = False
def __UpperCAmelCase ( self , __a , __a , __a , __a ):
'''simple docstring'''
if not self.initialized:
__a : Any = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
__a : int = True
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( a__ ):
def __init__( self , __a , __a , __a , __a , __a=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
__a : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__a : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__a : List[Any] = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
__a : Any = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __a , __a=None , **__a ):
'''simple docstring'''
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __a , __a , __a=None , **__a ):
'''simple docstring'''
__a : int = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
__a : Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__a : Any = rag_tokenizer.question_encoder
__a : Tuple = rag_tokenizer.generator
if indexed_dataset is not None:
__a : Tuple = '''custom'''
__a : Optional[Any] = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
__a : List[str] = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
| 369
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 0
|
'''simple docstring'''
from __future__ import annotations
__lowercase : Dict = 'Muhammad Umer Farooq'
__lowercase : Tuple = 'MIT'
__lowercase : List[Any] = '1.0.0'
__lowercase : Dict = 'Muhammad Umer Farooq'
__lowercase : Union[str, Any] = 'contact@muhammadumerfarooq.me'
__lowercase : Optional[Any] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCamelCase ( __UpperCamelCase ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : list[str] = []
__a : Tuple = domain
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a : Any = parse.urljoin(self.domain , _lowerCAmelCase )
self.urls.append(_lowerCAmelCase )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return ".".join(get_sub_domain_name(__SCREAMING_SNAKE_CASE ).split('.' )[-2:] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return parse.urlparse(__SCREAMING_SNAKE_CASE ).netloc
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://github.com" ):
__a : int = get_domain_name(__SCREAMING_SNAKE_CASE )
# Initialize the parser
__a : int = Parser(__SCREAMING_SNAKE_CASE )
try:
# Open URL
__a : str = requests.get(__SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a : Optional[int] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a : Dict = requests.get(__SCREAMING_SNAKE_CASE )
# Get the valid email.
__a : str = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : int = emails_from_url('https://github.com')
print(f'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 370
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
'''simple docstring'''
def lowerCamelCase ():
__a : Union[str, Any] = []
__a : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
__a : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 371
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , __a , __a , __a = None , __a = None ):
'''simple docstring'''
super().__init__()
__a : Any = pad_token_id
__a : Any = max_length
__a : Tuple = vocab
__a : int = merges
__a : Union[str, Any] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def __UpperCAmelCase ( cls , __a , *__a , **__a ):
'''simple docstring'''
__a : List[str] = [' '.join(_a ) for m in tokenizer.bpe_ranks.keys()]
__a : Optional[int] = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def __UpperCAmelCase ( cls , __a , *__a , **__a ):
'''simple docstring'''
__a : Any = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
return cls(**_a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = self.tf_tokenizer(_a )
__a : Union[str, Any] = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
__a : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
__a , __a : Any = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 350
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowercase : int = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'RegNetConfig'
# Base docstring
__lowercase : Optional[Any] = 'facebook/regnet-y-040'
__lowercase : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
__lowercase : Tuple = 'facebook/regnet-y-040'
__lowercase : int = 'tabby, tabby cat'
__lowercase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a = 3 , __a = 1 , __a = 1 , __a = "relu" , ):
'''simple docstring'''
super().__init__()
__a : Optional[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
__a : List[Any] = nn.BatchNormad(lowercase_ )
__a : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = self.convolution(lowercase_ )
__a : Dict = self.normalization(lowercase_ )
__a : Tuple = self.activation(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__a : Dict = config.num_channels
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__a : Optional[Any] = self.embedder(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a = 2 ):
'''simple docstring'''
super().__init__()
__a : Tuple = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
__a : Optional[int] = nn.BatchNormad(lowercase_ )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self.convolution(lowercase_ )
__a : Optional[int] = self.normalization(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
__a : List[Any] = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = self.pooler(lowercase_ )
__a : Union[str, Any] = self.attention(lowercase_ )
__a : Dict = hidden_state * attention
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a = 1 ):
'''simple docstring'''
super().__init__()
__a : Optional[Any] = in_channels != out_channels or stride != 1
__a : List[str] = max(1 , out_channels // config.groups_width )
__a : Tuple = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
__a : Optional[int] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
__a : Optional[Any] = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = hidden_state
__a : int = self.layer(lowercase_ )
__a : List[Any] = self.shortcut(lowercase_ )
hidden_state += residual
__a : Tuple = self.activation(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a = 1 ):
'''simple docstring'''
super().__init__()
__a : int = in_channels != out_channels or stride != 1
__a : Any = max(1 , out_channels // config.groups_width )
__a : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
__a : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
__a : int = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = hidden_state
__a : str = self.layer(lowercase_ )
__a : List[str] = self.shortcut(lowercase_ )
hidden_state += residual
__a : Optional[int] = self.activation(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a = 2 , __a = 2 , ):
'''simple docstring'''
super().__init__()
__a : Optional[Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
__a : Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = self.layers(lowercase_ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__a : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def __UpperCAmelCase ( self , __a , __a = False , __a = True ):
'''simple docstring'''
__a : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a : Optional[Any] = hidden_states + (hidden_state,)
__a : Union[str, Any] = stage_module(lowercase_ )
if output_hidden_states:
__a : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __UpperCamelCase ( lowercase__ ):
A_ = RegNetConfig
A_ = """regnet"""
A_ = """pixel_values"""
A_ = True
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCAmelCase ( self , __a , __a=False ):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
__a : Optional[Any] = value
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Dict = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __UpperCamelCase ( lowercase__ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(lowercase_ )
__a : int = config
__a : List[str] = RegNetEmbeddings(lowercase_ )
__a : Tuple = RegNetEncoder(lowercase_ )
__a : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a , __a = None , __a = None ):
'''simple docstring'''
__a : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__a : int = self.embedder(lowercase_ )
__a : Dict = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
__a : Optional[int] = encoder_outputs[0]
__a : Optional[int] = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __UpperCamelCase ( lowercase__ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(lowercase_ )
__a : Optional[int] = config.num_labels
__a : Optional[int] = RegNetModel(lowercase_ )
# classification head
__a : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
__a : List[str] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : Any = self.classifier(lowercase_ )
__a : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = "single_label_classification"
else:
__a : int = "multi_label_classification"
if self.config.problem_type == "regression":
__a : Tuple = MSELoss()
if self.num_labels == 1:
__a : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
__a : str = CrossEntropyLoss()
__a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : List[str] = BCEWithLogitsLoss()
__a : List[Any] = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
__a : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 351
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 0
|
'''simple docstring'''
__lowercase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_58_18,
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Optional[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
A_ = ["image_processor", "tokenizer"]
A_ = "CLIPImageProcessor"
A_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
__a : Tuple = kwargs.pop('feature_extractor' )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a : Dict = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
__a : Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
__a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.tokenizer.model_input_names
__a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 0
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowercase : Optional[Any] = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase : int = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Union[str, Any] = list(s_dict.keys() )
for key in keys:
__a : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__a : int = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""{key} -> {new_key}""" )
__a : Optional[Any] = s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a , __a : Optional[Any] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
__a : List[str] = os.path.basename(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = url.split('/' )[-2]
__a : Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
__a : Optional[int] = open(_SCREAMING_SNAKE_CASE , 'rb' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=1_024 ) as loop:
while True:
__a : List[str] = source.read(8_192 )
if not buffer:
break
output.write(_SCREAMING_SNAKE_CASE )
loop.update(len(_SCREAMING_SNAKE_CASE ) )
__a : int = open(_SCREAMING_SNAKE_CASE , 'rb' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ):
if ".pt" not in checkpoint_path:
__a : int = _download(_MODELS[checkpoint_path] )
else:
__a : str = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Tuple = original_checkpoint['dims']
__a : Optional[Any] = original_checkpoint['model_state_dict']
__a : str = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
__a : List[str] = True
__a : Any = state_dict['decoder.layers.0.fc1.weight'].shape[0]
__a : Optional[Any] = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
__a : Any = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__a , __a : Dict = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
__a : int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__a : List[str] = proj_out_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowercase : List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 354
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase : str = logging.get_logger(__name__)
class __UpperCamelCase :
def __init__( self , __a = None , __a = None , __a=None , __a=None ):
'''simple docstring'''
if not conversation_id:
__a : Optional[int] = uuid.uuida()
if past_user_inputs is None:
__a : Any = []
if generated_responses is None:
__a : List[str] = []
__a : uuid.UUID = conversation_id
__a : List[str] = past_user_inputs
__a : List[str] = generated_responses
__a : Optional[str] = text
def __eq__( self , __a ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self , __a , __a = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__a : Union[str, Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__a : str = text
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__a : Optional[int] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__a : List[Any] = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__a : Dict = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , *__a , **__a ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
__a : str = self.tokenizer.eos_token
def __UpperCAmelCase ( self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : Optional[Any] = {}
__a : Optional[Any] = {}
__a : Tuple = {}
if min_length_for_response is not None:
__a : Union[str, Any] = min_length_for_response
if minimum_tokens is not None:
__a : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
__a : Optional[int] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__a : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __a , __a=0 , **__a ):
'''simple docstring'''
__a : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self , __a , __a=32 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
__a : int = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__a : Dict = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
__a : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__a : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self , __a , __a=10 , **__a ):
'''simple docstring'''
__a : Any = generate_kwargs.get('max_length' , self.model.config.max_length )
__a : Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__a : Any = max_length - minimum_tokens
__a : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__a : str = model_inputs["""attention_mask"""][:, -trim:]
__a : Union[str, Any] = model_inputs.pop('conversation' )
__a : int = max_length
__a : int = self.model.generate(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
__a : List[str] = 1
else:
__a : Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self , __a , __a=True ):
'''simple docstring'''
__a : Dict = model_outputs["""output_ids"""]
__a : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , )
__a : Dict = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = self.tokenizer.eos_token_id
__a : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
__a : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 355
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.