code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import numpy as np
def _lowercase ( UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowercase ( UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
import os
from math import logaa
def _lowercase ( UpperCamelCase_ = "base_exp.txt" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(map(UpperCamelCase_ , line.split(',' ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
SCREAMING_SNAKE_CASE__ = x * logaa(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 176 | 1 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def _snake_case ( _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for i in range(0 , _snake_case ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def _snake_case ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for i in range(_snake_case , 0 , -1 ):
for _ in range(_snake_case , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def _snake_case ( _snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(_snake_case ) # upper half
reverse_floyd(_snake_case ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
a = 1
while K:
a = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
a = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 271 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCAmelCase_ ( self : int ):
_A = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = v.to_dict()
return d
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
A__ : Optional[Any] =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "new" , lowerCAmelCase = None ):
"""simple docstring"""
_lowerCAmelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ):
_lowerCAmelCase = f"Invalid search term: {invalid_search_terms}"
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_29:
raise requests.HTTPError
_lowerCAmelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )}
_lowerCAmelCase = {}
for id_ in range(lowerCAmelCase ):
_lowerCAmelCase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 70 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 | 0 |
from __future__ import annotations
import requests
SCREAMING_SNAKE_CASE : Any = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "new" , lowerCamelCase_ = None ) -> Tuple:
_lowercase : List[str] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_a ) - valid_terms ) ):
_lowercase : int = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_a )
_lowercase : Optional[int] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_lowercase : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_a )}
_lowercase : Dict = {}
for id_ in range(_a ):
_lowercase : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 364 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
_lowercase : list[float] = list(_UpperCamelCase )
_lowercase : Optional[Any] = degree
def __add__( self , _UpperCamelCase ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
_lowercase : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCamelCase )
else:
_lowercase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCamelCase )
def __sub__( self , _UpperCamelCase ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
_lowercase : Dict = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCamelCase )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : list[float] = [0] * self.degree
for i in range(self.degree ):
_lowercase : Union[str, Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase = 0 ):
"""simple docstring"""
_lowercase : list[float] = [0] * (self.degree + 2)
_lowercase : Optional[int] = constant
for i in range(self.degree + 1 ):
_lowercase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCamelCase )
def __eq__( self , _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _UpperCamelCase ):
"""simple docstring"""
return not self.__eq__(_UpperCamelCase )
| 250 |
"""simple docstring"""
from itertools import product
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
lowerCAmelCase__ : Union[str, Any] = sides_number
lowerCAmelCase__ : Optional[int] = max_face_number * dice_number
lowerCAmelCase__ : List[str] = [0] * (max_total + 1)
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Optional[int] = range(__UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(__UpperCAmelCase , repeat=__UpperCAmelCase ):
lowerCAmelCase__ : str = sum(__UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> float:
lowerCAmelCase__ : Union[str, Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 9
lowerCAmelCase__ : Tuple = 4 * 9
lowerCAmelCase__ : Optional[int] = 6
for peter_total in range(__UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ : Tuple = (4**9) * (6**6)
lowerCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase__ : Optional[int] = round(__UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ):
snake_case : Optional[int] = 9
snake_case : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : List[str] = kruskal(__lowerCamelCase , __lowerCamelCase )
snake_case : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 353 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__A =['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__A ={'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A =''' Hello world! cécé herlolip'''
__A =[
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if not os.path.exists(lowerCamelCase__ ):
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , lowerCamelCase__ ).eval()
else:
lowerCamelCase_ = load_xsum_checkpoint(lowerCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCamelCase_ = checkpoint_path.replace("." , "-" )
lowerCamelCase_ = BartConfig.from_pretrained(lowerCamelCase__ )
lowerCamelCase_ = bart.encode(lowerCamelCase__ ).unsqueeze(0 )
lowerCamelCase_ = BartTokenizer.from_pretrained(lowerCamelCase__ ).encode(lowerCamelCase__ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase__ , lowerCamelCase__ ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
lowerCamelCase_ = bart.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = BartForSequenceClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = bart.predict("mnli" , lowerCamelCase__ , return_logits=lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ )[0] # logits
else: # no classification heads to worry about
lowerCamelCase_ = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCamelCase_ = bart.extract_features(lowerCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase_ = BartModel(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ ).model[0]
else:
lowerCamelCase_ = BartForConditionalGeneration(lowerCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "lm_head" ):
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
lowerCamelCase_ = model.model(lowerCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__A =parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 19 |
'''simple docstring'''
import string
from math import logaa
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
UpperCAmelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase : Tuple = corpus_without_punctuation.split('\n' )
UpperCAmelCase : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return round(tf * idf , 3 )
| 151 | 0 |
from __future__ import annotations
def _lowercase ( __snake_case = 4 ) -> list[list[int]]:
__lowerCAmelCase : str = abs(__snake_case ) or 4
return [[1 + x + y * row_size for x in range(__snake_case )] for y in range(__snake_case )]
def _lowercase ( __snake_case ) -> list[list[int]]:
return reverse_row(transpose(__snake_case ) )
# OR.. transpose(reverse_column(matrix))
def _lowercase ( __snake_case ) -> list[list[int]]:
return reverse_row(reverse_column(__snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowercase ( __snake_case ) -> list[list[int]]:
return reverse_column(transpose(__snake_case ) )
# OR.. transpose(reverse_row(matrix))
def _lowercase ( __snake_case ) -> list[list[int]]:
__lowerCAmelCase : Dict = [list(__snake_case ) for x in zip(*__snake_case )]
return matrix
def _lowercase ( __snake_case ) -> list[list[int]]:
__lowerCAmelCase : Any = matrix[::-1]
return matrix
def _lowercase ( __snake_case ) -> list[list[int]]:
__lowerCAmelCase : Union[str, Any] = [x[::-1] for x in matrix]
return matrix
def _lowercase ( __snake_case ) -> None:
for i in matrix:
print(*__snake_case )
if __name__ == "__main__":
__snake_case : int = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__snake_case : List[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__snake_case : Tuple = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 352 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowercase ( __snake_case ) -> Dict:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" ,usage="datasets-cli <command> [<args>]" ,allow_abbrev=__snake_case )
__lowerCAmelCase : str = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
TestCommand.register_subcommand(__snake_case )
RunBeamCommand.register_subcommand(__snake_case )
DummyDataCommand.register_subcommand(__snake_case )
# Parse args
__lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_known_args()
if not hasattr(__snake_case ,"func" ):
parser.print_help()
exit(1 )
__lowerCAmelCase : List[Any] = parse_unknown_args(__snake_case )
# Run
__lowerCAmelCase : Union[str, Any] = args.func(__snake_case ,**__snake_case )
service.run()
if __name__ == "__main__":
main() | 58 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def UpperCamelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase_ ( self ):
lowercase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase = image_classifier(_lowerCamelCase , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
] , )
@require_tf
def UpperCamelCase_ ( self ):
lowercase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase = image_classifier(_lowerCamelCase , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
[
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
{'score': 0.3_3_3, 'label': ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def UpperCamelCase_ ( self ):
lowercase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase = image_classifier(_lowerCamelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase_ ( self ):
lowercase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase = image_classifier(_lowerCamelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
lowercase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 220 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[Any] = 3
class a ( a_ ):
pass
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = int(os.environ['RANK'] )
lowercase = int(os.environ['WORLD_SIZE'] )
lowercase = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
lowercase = parser.parse_args()
lowercase = args.streaming
lowercase = args.num_workers
lowercase = {'shards': [f'shard_{shard_idx}' for shard_idx in range(__snake_case )]}
lowercase = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
lowercase = Dataset.from_list(list(__snake_case ) )
lowercase = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
lowercase = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
lowercase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 220 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = FunnelTokenizer
lowercase__ = FunnelTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
_snake_case : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_snake_case : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self: Union[str, Any], **a_: Dict ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname, **_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self: Union[str, Any], **a_: Optional[int] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self: Dict, a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = "UNwant\u00E9d,running"
_snake_case : int = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class(self.vocab_file )
_snake_case : Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE, ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ), [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
_snake_case : Dict = tokenizer("""UNwant\u00E9d,running""" )
_snake_case : List[str] = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""], [2] + [0] * sentence_len )
_snake_case : Dict = tokenizer("""UNwant\u00E9d,running""", """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""], [2] + [0] * sentence_len + [1] * sentence_len )
| 364 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ = '''<<<<<<< This should probably be modified because it mentions: '''
A_ = '''=======
>>>>>>>
'''
A_ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
A_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase__ (snake_case__ : Namespace ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase( __a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
_snake_case : Tuple = parser.add_parser(
"""convert""", help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""", )
train_parser.add_argument(
"""--tfds_path""", type=a_, required=a_, help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""", )
train_parser.add_argument(
"""--datasets_directory""", type=a_, required=a_, help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a_ )
def __init__( self: List[str], a_: str, a_: str, *a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = get_logger("""datasets-cli/converting""" )
_snake_case : Any = tfds_path
_snake_case : Optional[Any] = datasets_directory
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_snake_case : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_snake_case : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_snake_case : Union[str, Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
_snake_case : Tuple = []
_snake_case : Dict = []
_snake_case : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
_snake_case : List[str] = os.listdir(a_ )
else:
_snake_case : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
_snake_case : Dict = os.path.join(a_, a_ )
_snake_case : Union[str, Any] = os.path.join(a_, a_ )
if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a_, encoding="""utf-8""" ) as f:
_snake_case : str = f.readlines()
_snake_case : List[str] = []
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : Optional[Any] = []
for line in lines:
_snake_case : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_snake_case : Optional[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_snake_case : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
_snake_case : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
_snake_case : Any = out_line.replace("""getLogger""", """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_snake_case : Union[str, Any] = True
_snake_case : Optional[Any] = list(filter(lambda a_ : e in out_line, a_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + """\n""" )
out_lines.append(a_ )
out_lines.append(a_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_snake_case : List[str] = re.sub(a_, a_, a_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_snake_case : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""", a_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_snake_case : Optional[Any] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_snake_case : Tuple = True
out_lines.append(a_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_snake_case : List[str] = f_name.replace(""".py""", """""" )
_snake_case : str = os.path.join(a_, a_ )
_snake_case : str = os.path.join(a_, a_ )
os.makedirs(a_, exist_ok=a_ )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a_ )
if needs_manual_update:
with_manual_update.append(a_ )
with open(a_, """w""", encoding="""utf-8""" ) as f:
f.writelines(a_ )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
_snake_case : Optional[int] = os.path.basename(a_ )
_snake_case : Optional[Any] = imports_to_builder_map[f_name.replace(""".py""", """""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(a_, a_ )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 132 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.0_2 , )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_snake_case = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
__UpperCamelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 328 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 293 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowerCamelCase_ = re.compile(r"^\s*try:")
# Catches a line with else:
lowerCamelCase_ = re.compile(r"^\s*else:")
def __lowerCamelCase ( a_ : Dict ) -> int:
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
__SCREAMING_SNAKE_CASE :List[Any] = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def __lowerCamelCase ( a_ : Tuple ) -> List[Any]:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE :Any = f.readlines()
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__SCREAMING_SNAKE_CASE :str = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__SCREAMING_SNAKE_CASE :List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE :List[Any] = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
__SCREAMING_SNAKE_CASE :int = re.findall('''\[([^\]]+)\]''' , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__SCREAMING_SNAKE_CASE :str = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
__SCREAMING_SNAKE_CASE :Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE :Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE :Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE :Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__SCREAMING_SNAKE_CASE :Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE :Optional[int] = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(''', ''' )
__SCREAMING_SNAKE_CASE :Tuple = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(''', ''' )
__SCREAMING_SNAKE_CASE :Tuple = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''\"''' ):
objects.append(line[13:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__SCREAMING_SNAKE_CASE :Any = lines[line_index]
__SCREAMING_SNAKE_CASE :Union[str, Any] = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE :List[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE :Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE :Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE :Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__SCREAMING_SNAKE_CASE :str = lines[line_index]
__SCREAMING_SNAKE_CASE :Dict = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE :int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( a_ : str , a_ : Dict ) -> List[Any]:
def find_duplicates(a_ : int ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__SCREAMING_SNAKE_CASE :Optional[int] = []
for key in import_dict_objects.keys():
__SCREAMING_SNAKE_CASE :Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__SCREAMING_SNAKE_CASE :List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __lowerCamelCase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[int] = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__SCREAMING_SNAKE_CASE :Any = os.path.join(lowerCAmelCase__ , '''__init__.py''' )
__SCREAMING_SNAKE_CASE :int = parse_init(lowerCAmelCase__ )
if objects is not None:
__SCREAMING_SNAKE_CASE :str = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE :int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError('''\n\n'''.join(lowerCAmelCase__ ) )
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[Any] = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__SCREAMING_SNAKE_CASE :Tuple = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE :List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
__SCREAMING_SNAKE_CASE :List[str] = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE :List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
lowerCamelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__SCREAMING_SNAKE_CASE :Tuple = spec.loader.load_module()
__SCREAMING_SNAKE_CASE :Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 360 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(a_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
def __lowerCamelCase ( ) -> None:
__SCREAMING_SNAKE_CASE :Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__SCREAMING_SNAKE_CASE :Optional[int] = math.log(len(a_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , a_ , a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 239 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""PoolFormerFeatureExtractor"""]
UpperCamelCase__ = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 92 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
__lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / "test_results.txt"
__lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 92 | 1 |
import argparse
import json
import subprocess
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
__a = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__a = subprocess.run(_UpperCAmelCase , shell=_UpperCAmelCase , stdout=subprocess.PIPE )
__a = output.stdout.decode('''utf-8''' )
__a = json.loads(_UpperCAmelCase )
__a = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
__a = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __snake_case ( _UpperCAmelCase ):
return values.split(''',''' )
__snake_case :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__snake_case :Optional[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 131 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''gptsan-japanese'''
UpperCamelCase__ : Dict = [
'''past_key_values''',
]
UpperCamelCase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=36_000 , __SCREAMING_SNAKE_CASE : Tuple=1_280 , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=8_192 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]="float32" , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=0.0_02 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=35_998 , __SCREAMING_SNAKE_CASE : Optional[int]=35_995 , __SCREAMING_SNAKE_CASE : List[str]=35_999 , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = d_ff
__a = d_ext
__a = d_spout
__a = num_switch_layers
__a = num_ext_layers
__a = num_switch_layers + num_ext_layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = dropout_rate
__a = layer_norm_epsilon
__a = router_bias
__a = router_jitter_noise
__a = router_dtype
__a = router_ignore_padding_tokens
__a = output_hidden_states
__a = output_attentions
__a = initializer_factor
__a = output_router_logits
__a = use_cache
super().__init__(
separator_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 131 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCamelCase_ = '''bert-base-cased'''
UpperCamelCase_ = '''google/pegasus-xsum'''
UpperCamelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
UpperCamelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
UpperCamelCase_ = '''patrickvonplaten/t5-tiny-random'''
UpperCamelCase_ = '''sshleifer/bart-tiny-random'''
UpperCamelCase_ = '''sshleifer/tiny-mbart'''
UpperCamelCase_ = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase_ ( _a : Path , _a : list ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """\n""".join(_a )
Path(_a ).open("""w""" ).writelines(_a )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_a , F'''{split}.source''' ) , _a )
_dump_articles(os.path.join(_a , F'''{split}.target''' ) , _a )
return tmp_dir
class _snake_case ( __snake_case ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def A__ ( self: List[Any] ,lowerCamelCase_: Tuple ) -> int:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
UpperCAmelCase_ : List[str] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : List[str] = SeqaSeqDataset(
lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=lowerCamelCase_ ,max_target_length=lowerCamelCase_ ,src_lang=lowerCamelCase_ ,tgt_lang=lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = DataLoader(lowerCamelCase_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : List[str] = shift_tokens_right(batch["""labels"""] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Dict:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=20 ,max_target_length=lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[Any] = DataLoader(lowerCamelCase_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
UpperCAmelCase_ : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : List[str] = tmp_dir.joinpath("""train.source""" ).open().readlines()
UpperCAmelCase_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ ,lowerCamelCase_ ,128 ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : Optional[Any] = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : Optional[Any] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="""This test requires fairseq""" )
def A__ ( self: Tuple ) -> Any:
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self._get_dataset(max_len=64 )
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : Tuple = ds.make_dynamic_sampler(lowerCamelCase_ ,required_batch_size_multiple=lowerCamelCase_ )
UpperCAmelCase_ : str = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
UpperCAmelCase_ : str = DataLoader(lowerCamelCase_ ,batch_sampler=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
for batch in data_loader:
UpperCAmelCase_ : Optional[Any] = batch["""input_ids"""].shape
UpperCAmelCase_ : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : str = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(F'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._get_dataset(max_len=512 )
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : int = ds.make_sortish_sampler(lowerCamelCase_ ,shuffle=lowerCamelCase_ )
UpperCAmelCase_ : int = DataLoader(lowerCamelCase_ ,batch_size=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : Optional[int] = DataLoader(lowerCamelCase_ ,batch_size=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_: List[Any] ,lowerCamelCase_: str="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ ,k="""labels""" ) ) < sum(count_pad_tokens(lowerCamelCase_ ,k="""labels""" ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: int=1000 ,lowerCamelCase_: Optional[int]=128 ) -> Optional[Any]:
if os.getenv("""USE_REAL_DATA""" ,lowerCamelCase_ ):
UpperCAmelCase_ : str = """examples/seq2seq/wmt_en_ro"""
UpperCAmelCase_ : Dict = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath("""train.len""" ).exists():
save_len_file(lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : int = """examples/seq2seq/test_data/wmt_en_ro"""
UpperCAmelCase_ : Dict = max_len * 4
save_len_file(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : str = SeqaSeqDataset(
lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=lowerCamelCase_ ,max_target_length=lowerCamelCase_ ,n_obs=lowerCamelCase_ ,)
return ds, max_tokens, tokenizer
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset()
UpperCAmelCase_ : Union[str, Any] = set(DistributedSortishSampler(lowerCamelCase_ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=lowerCamelCase_ ) )
UpperCAmelCase_ : Dict = set(DistributedSortishSampler(lowerCamelCase_ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ,use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
UpperCAmelCase_ : Any = SeqaSeqDataset(
lowerCamelCase_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,src_lang="""EN""" ,tgt_lang="""FR""" ,)
UpperCAmelCase_ : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : Optional[int] = SeqaSeqDataset(
lowerCamelCase_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,)
UpperCAmelCase_ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 345 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def A__ ( self: Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def A__ ( self: Tuple ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_uncond_unet
UpperCAmelCase_ : List[Any] = DDIMScheduler()
UpperCAmelCase_ : List[Any] = self.dummy_vq_model
UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 345 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__a = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Tuple = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(a_, a_ )
__a = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase : Optional[Any] = new_key.replace(a_, a_ )
print(f"""{key} -> {new_key}""" )
_UpperCAmelCase : List[str] = s_dict.pop(a_ )
return s_dict
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : Tuple = nn.Linear(a_, a_, bias=a_ )
_UpperCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def __UpperCAmelCase ( a_: str, a_: str ):
os.makedirs(a_, exist_ok=a_ )
_UpperCAmelCase : Dict = os.path.basename(a_ )
_UpperCAmelCase : Tuple = url.split("/" )[-2]
_UpperCAmelCase : Optional[int] = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a_ ):
_UpperCAmelCase : str = open(a_, "rb" ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a_ ) as source, open(a_, "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ), ncols=80, unit="iB", unit_scale=a_, unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase : int = source.read(8_192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
_UpperCAmelCase : Any = open(a_, "rb" ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any] ):
if ".pt" not in checkpoint_path:
_UpperCAmelCase : Tuple = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase : Tuple = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : List[Any] = original_checkpoint["dims"]
_UpperCAmelCase : Any = original_checkpoint["model_state_dict"]
_UpperCAmelCase : Dict = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(a_ )
rename_keys(a_ )
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_UpperCAmelCase : str = WhisperConfig(
vocab_size=dimensions["n_vocab"], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_state"], max_source_positions=dimensions["n_audio_ctx"], )
_UpperCAmelCase : int = WhisperForConditionalGeneration(a_ )
_UpperCAmelCase , _UpperCAmelCase : Dict = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_UpperCAmelCase : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase : Tuple = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__a = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) | 354 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase = 5_0_0_0_0
__lowerCAmelCase = 5_0_0_0
__lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__)
__lowerCAmelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCAmelCase_ (__a : datasets.Dataset , __a : Tuple ):
"""simple docstring"""
for i in range(__a ):
_a : Optional[int] = dataset[i]
@get_duration
def UpperCAmelCase_ (__a : datasets.Dataset , __a : Optional[Any] , __a : Optional[Any] ):
"""simple docstring"""
for i in range(0 , len(__a ) , __a ):
_a : Tuple = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase_ (__a : datasets.Dataset , __a : Optional[int] , __a : Tuple ):
"""simple docstring"""
with dataset.formatted_as(type=__a ):
for i in range(__a ):
_a : Tuple = dataset[i]
@get_duration
def UpperCAmelCase_ (__a : datasets.Dataset , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
with dataset.formatted_as(type=__a ):
for i in range(0 , __a , __a ):
_a : List[str] = dataset[i : i + batch_size]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = {'num examples': SPEED_TEST_N_EXAMPLES}
_a : Optional[int] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
_a : List[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
_a : str = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
_a : int = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(__a ) )
_a : List[str] = func(__a , **__a )
print('shuffling dataset' )
_a : Dict = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(__a ) )
_a : Dict = func(
__a , **__a )
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 365 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Tuple = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCamelCase__ : str =[]
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Union[str, Any] =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Any =get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowerCamelCase__ : Tuple =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : List[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : Union[str, Any] =get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCamelCase__ : str =replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Union[str, Any] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Optional[int] =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Dict =replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Dict =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : List[Any] ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCamelCase__ : List[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[int] =special_dtypes
lowerCamelCase__ : List[str] =no_split_module_classes
lowerCamelCase__ : Tuple =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : List[str] =get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : str =max_memory
lowerCamelCase__ : Any =infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : List[str] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : List[str] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Optional[Any] =[]
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Optional[Any] ='''.'''.join(__lowerCamelCase )
lowerCamelCase__ : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Any =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : List[str] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCamelCase__ : Any =module.weight.data
if module.bias is not None:
lowerCamelCase__ : Any =module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
lowerCamelCase__ : Optional[Any] =deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : Union[str, Any] =find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any =sum(__lowerCamelCase , [] )
lowerCamelCase__ : Any =len(__lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Optional[Any] =False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowerCamelCase__ : Dict =not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : List[str] =list(model.named_children() )
lowerCamelCase__ : Any =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Optional[Any] =set(__lowerCamelCase ) - set(__lowerCamelCase )
lowerCamelCase__ : List[str] =list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : Optional[Any] =['''.weight''', '''.bias''']
lowerCamelCase__ : List[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Union[str, Any] =name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( __lowerCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =param_name
lowerCamelCase__ : Dict =model
if "." in tensor_name:
lowerCamelCase__ : Optional[int] =tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Union[str, Any] =new_module
lowerCamelCase__ : List[Any] =splits[-1]
# offload weights
lowerCamelCase__ : Optional[Any] =False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 272 | 0 |
'''simple docstring'''
def snake_case_ (_a : list ):
def merge(_a : list , _a : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_a ) <= 1:
return collection
UpperCAmelCase = len(_a ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A =input('Enter numbers separated by a comma:\n').strip()
A =[int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 34 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "data2vec-text"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = layer_norm_eps
lowerCAmelCase_ :List[Any] = position_embedding_type
lowerCAmelCase_ :List[Any] = use_cache
lowerCAmelCase_ :List[Any] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 84 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[int]=0 ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = np.random.RandomState(_A )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : List[str] = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**_A ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : Any = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : str = self.get_dummy_inputs()
__lowercase : str = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Any = pipe(**_A ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : Tuple = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : List[Any] = pipe(**_A ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : int = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
__lowercase : Union[str, Any] = pipe(**_A )
__lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = self.get_dummy_inputs()
__lowercase : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__lowercase : str = pipe.tokenizer(
_A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors="""np""" , )
__lowercase : int = text_inputs["""input_ids"""]
__lowercase : Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase : List[Any] = prompt_embeds
# forward
__lowercase : int = pipe(**_A )
__lowercase : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Dict = 3 * ["""this is a negative prompt"""]
__lowercase : str = negative_prompt
__lowercase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
__lowercase : Any = pipe(**_A )
__lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
__lowercase : str = self.get_dummy_inputs()
__lowercase : Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
__lowercase : str = []
for p in [prompt, negative_prompt]:
__lowercase : int = pipe.tokenizer(
_A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors="""np""" , )
__lowercase : Any = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase : int = embeds
# forward
__lowercase : List[str] = pipe(**_A )
__lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = ort.SessionOptions()
__lowercase : List[str] = False
return options
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Union[str, Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase : List[str] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase : Dict = output.images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : List[str] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = """open neural network exchange"""
__lowercase : List[str] = np.random.RandomState(0 )
__lowercase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type="""np""" )
__lowercase : Union[str, Any] = output.images
__lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : Optional[int] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Union[str, Any] = """open neural network exchange"""
__lowercase : Optional[Any] = np.random.RandomState(0 )
__lowercase : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type="""np""" )
__lowercase : Tuple = output.images
__lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : str = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = 0
def test_callback_fn(__a : Dict , __a : Dict , __a : Dict ) -> None:
__lowercase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase : List[str] = latents[0, -3:, -3:, -1]
__lowercase : Any = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase : Dict = latents[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__lowercase : Dict = False
__lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Any = """Andromeda galaxy in a bottle"""
__lowercase : str = np.random.RandomState(0 )
pipe(
prompt=_A , num_inference_steps=5 , guidance_scale=7.5 , generator=_A , callback=_A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_A , _A )
assert pipe.safety_checker is None
__lowercase : List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(_A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None | 364 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 306 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = "▁"
lowerCamelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Dict = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCamelCase : Any = {
"google/pegasus-xsum": 512,
}
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PegasusTokenizer
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , A_ : List[Any]=None , A_ : int=None , A_ : Optional[int]="<pad>" , A_ : str="</s>" , A_ : str="<unk>" , A_ : int="<mask_2>" , A_ : Any="<mask_1>" , A_ : int=None , A_ : int=103 , **A_ : Dict , ) -> Any:
"""simple docstring"""
lowerCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is"""
f""" {type(UpperCAmelCase_ )}""" )
lowerCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCamelCase_ = additional_special_tokens_extended
else:
lowerCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : Tuple , A_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self : str , A_ : List , A_ : Optional[List] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self : int , A_ : List[Any] , A_ : str=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self : List[str] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[Any] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'camembert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : str = intermediate_size
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = initializer_range
A_ : str = layer_norm_eps
A_ : int = position_embedding_type
A_ : Dict = use_cache
A_ : Any = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 135 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowercase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowercase__ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
a_ : str = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , a_ : Optional[Any] , a_ : Optional[int]="<s>" , a_ : Dict="</s>" , a_ : Optional[Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Tuple="<unk>" , a_ : List[str]="<pad>" , a_ : List[str]="<mask>" , a_ : Tuple = None , **a_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
lowerCAmelCase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Any = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
lowerCAmelCase_ : Dict = self.__dict__.copy()
lowerCAmelCase_ : int = None
lowerCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , a_ : Dict ):
lowerCAmelCase_ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase ( self : Optional[int] , a_ : List[Any] , a_ : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self : List[Any] , a_ : Union[str, Any] , a_ : List[Any] = None , a_ : Optional[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def lowerCamelCase ( self : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] = None ):
lowerCAmelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : str ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : Optional[Any] , a_ : str ):
return self.sp_model.encode(a_ , out_type=a_ )
def lowerCamelCase ( self : Optional[int] , a_ : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Tuple = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : List[str] , a_ : Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Tuple , a_ : List[Any] ):
lowerCAmelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[Any] , a_ : int = None ):
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : List[str] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 241 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase ( __lowerCamelCase : int ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->None:
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def lowerCamelCase ( __lowerCamelCase : bool , __lowerCamelCase : list[list[bool]] ) ->bool:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[int] ={
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["MobileViTFeatureExtractor"]
_lowercase : Any =["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase : Union[str, Any] =data_utils.TransfoXLTokenizer
_lowercase : Tuple =data_utils.TransfoXLCorpus
_lowercase : Optional[int] =data_utils
_lowercase : int =data_utils
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowercase , """rb""") as fp:
a__ : Optional[Any] = pickle.load(_lowercase , encoding="""latin1""")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : Dict = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''')
a__ : List[str] = corpus.vocab.__dict__
torch.save(_lowercase , _lowercase)
a__ : Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , _lowercase)
a__ : Tuple = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''')
torch.save(_lowercase , _lowercase)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Optional[Any] = os.path.abspath(_lowercase)
a__ : str = os.path.abspath(_lowercase)
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''')
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : List[str] = TransfoXLConfig()
else:
a__ : Any = TransfoXLConfig.from_json_file(_lowercase)
print(F'''Building PyTorch model from configuration: {config}''')
a__ : Any = TransfoXLLMHeadModel(_lowercase)
a__ : Optional[Any] = load_tf_weights_in_transfo_xl(_lowercase , _lowercase , _lowercase)
# Save pytorch-model
a__ : Tuple = os.path.join(_lowercase , _lowercase)
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
print(F'''Save PyTorch model to {os.path.abspath(_lowercase)}''')
torch.save(model.state_dict() , _lowercase)
print(F'''Save configuration file to {os.path.abspath(_lowercase)}''')
with open(_lowercase , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
_lowercase : Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 266 | 1 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase ( __snake_case : List[Any] , __snake_case : Any ):
assert isinstance(__snake_case , __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( __snake_case : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Any ):
lowercase_ : str = tmp_path / '''cache'''
lowercase_ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase_ : List[Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_sql_dataset(__snake_case , __snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : Union[str, Any] ):
lowercase_ : List[str] = tmp_path / '''cache'''
lowercase_ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase_ : Optional[Any] = features.copy() if features else default_expected_features
lowercase_ : int = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__snake_case , cache_dir=__snake_case ).read()
_check_sql_dataset(__snake_case , __snake_case )
def lowercase ( __snake_case : List[str] ):
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
lowercase_ : Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] ):
lowercase_ : Optional[Any] = tmp_path / '''cache'''
lowercase_ : Union[str, Any] = os.path.join(__snake_case , '''tmp.sql''' )
lowercase_ : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__snake_case ).read()
SqlDatasetWriter(__snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase_ : str = iter_sql_file(__snake_case )
lowercase_ : List[str] = iter_sql_file(__snake_case )
for rowa, rowa in zip(__snake_case , __snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Tuple ):
lowercase_ : Any = tmp_path / '''cache'''
lowercase_ : List[str] = os.path.join(__snake_case , '''tmp.sql''' )
lowercase_ : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__snake_case ).read()
SqlDatasetWriter(__snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase_ : Dict = iter_sql_file(__snake_case )
lowercase_ : Dict = iter_sql_file(__snake_case )
for rowa, rowa in zip(__snake_case , __snake_case ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( __snake_case : int , __snake_case : List[str] , __snake_case : str ):
lowercase_ : List[Any] = tmp_path / '''cache'''
lowercase_ : Union[str, Any] = os.path.join(__snake_case , '''tmp.sql''' )
lowercase_ : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__snake_case ).read()
with pytest.raises(__snake_case ):
SqlDatasetWriter(__snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 33 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-12 ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
SCREAMING_SNAKE_CASE__ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
return jnp.matmul(__lowerCAmelCase , norm_emb_a.T )
class __a (nn.Module):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :CLIPConfig
_SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vision_model(_a )[1]
SCREAMING_SNAKE_CASE__ : str = self.visual_projection(_a )
SCREAMING_SNAKE_CASE__ : List[str] = jax_cosine_distance(_a , self.special_care_embeds )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE__ : int = 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE__ : Dict = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE__ : Any = is_special_care * 0.01
SCREAMING_SNAKE_CASE__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = CLIPConfig
_SCREAMING_SNAKE_CASE :Union[str, Any] = """clip_input"""
_SCREAMING_SNAKE_CASE :Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[int]:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE__ : Any = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _a ( self , _a , _a , _a = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = jax.random.normal(_a , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = jax.random.split(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {"""params""": params_rng, """dropout""": dropout_rng}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.module.init(_a , _a )["""params"""]
return random_params
def __call__( self , _a , _a = None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 132 | 0 |
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305 | import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_snake_case = get_logger(__name__)
_snake_case = Path(__file__).parent / "model_card_template.md"
_snake_case = uuida().hex
_snake_case = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
_snake_case = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
_snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowerCAmelCase_ ( snake_case_ = None ):
_A : List[Any] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""","""""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case_,snake_case_ ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(snake_case_,snake_case_ ):
ua += "; " + user_agent
return ua
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None ):
if token is None:
_A : Union[str, Any] = HfFolder.get_token()
if organization is None:
_A : Any = whoami(snake_case_ )["""name"""]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(snake_case_,"""local_rank""" ) and args.local_rank not in [-1, 0]:
return
_A : Any = args.hub_token if hasattr(snake_case_,"""hub_token""" ) else None
_A : int = get_full_repo_name(snake_case_,token=snake_case_ )
_A : Optional[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""",license="""apache-2.0""",library_name="""diffusers""",tags=[],datasets=args.dataset_name,metrics=[],),template_path=snake_case_,model_name=snake_case_,repo_name=snake_case_,dataset_name=args.dataset_name if hasattr(snake_case_,"""dataset_name""" ) else None,learning_rate=args.learning_rate,train_batch_size=args.train_batch_size,eval_batch_size=args.eval_batch_size,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case_,"""gradient_accumulation_steps""" ) else None
),adam_betaa=args.adam_betaa if hasattr(snake_case_,"""adam_beta1""" ) else None,adam_betaa=args.adam_betaa if hasattr(snake_case_,"""adam_beta2""" ) else None,adam_weight_decay=args.adam_weight_decay if hasattr(snake_case_,"""adam_weight_decay""" ) else None,adam_epsilon=args.adam_epsilon if hasattr(snake_case_,"""adam_epsilon""" ) else None,lr_scheduler=args.lr_scheduler if hasattr(snake_case_,"""lr_scheduler""" ) else None,lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case_,"""lr_warmup_steps""" ) else None,ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case_,"""ema_inv_gamma""" ) else None,ema_power=args.ema_power if hasattr(snake_case_,"""ema_power""" ) else None,ema_max_decay=args.ema_max_decay if hasattr(snake_case_,"""ema_max_decay""" ) else None,mixed_precision=args.mixed_precision,)
_A : int = os.path.join(args.output_dir,"""README.md""" )
model_card.save(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
_A : int = str(Path(snake_case_ ).as_posix() )
_A : List[Any] = re.search(r"""snapshots/([^/]+)/""",snake_case_ )
if search is None:
return None
_A : int = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_snake_case = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
_snake_case = os.path.join(hf_cache_home, "diffusers")
def lowerCAmelCase_ ( snake_case_ = None,snake_case_ = None ):
if new_cache_dir is None:
_A : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
_A : str = old_diffusers_cache
_A : Any = Path(snake_case_ ).expanduser()
_A : Tuple = Path(snake_case_ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_A : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(snake_case_ )
new_blob_path.parent.mkdir(parents=snake_case_,exist_ok=snake_case_ )
os.replace(snake_case_,snake_case_ )
try:
os.symlink(snake_case_,snake_case_ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_snake_case = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
_snake_case = 0
else:
with open(cache_version_file) as f:
try:
_snake_case = int(f.read())
except ValueError:
_snake_case = 0
if cache_version < 1:
_snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
_snake_case = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
if variant is not None:
_A : List[str] = weights_name.split(""".""" )
_A : Dict = splits[:-1] + [variant] + splits[-1:]
_A : Any = """.""".join(snake_case_ )
return weights_name
def lowerCAmelCase_ ( snake_case_,*,
snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None,):
_A : Tuple = str(snake_case_ )
if os.path.isfile(snake_case_ ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case_ ):
if os.path.isfile(os.path.join(snake_case_,snake_case_ ) ):
# Load from a PyTorch checkpoint
_A : Tuple = os.path.join(snake_case_,snake_case_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case_,snake_case_,snake_case_ ) ):
_A : List[Any] = os.path.join(snake_case_,snake_case_,snake_case_ )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case_ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
_A : List[Any] = hf_hub_download(
snake_case_,filename=_add_variant(snake_case_,snake_case_ ),cache_dir=snake_case_,force_download=snake_case_,proxies=snake_case_,resume_download=snake_case_,local_files_only=snake_case_,use_auth_token=snake_case_,user_agent=snake_case_,subfolder=snake_case_,revision=revision or commit_hash,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''',snake_case_,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case_,snake_case_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case_,snake_case_ )}\' so that the correct variant file can be added.''',snake_case_,)
try:
# 2. Load model file as usual
_A : Dict = hf_hub_download(
snake_case_,filename=snake_case_,cache_dir=snake_case_,force_download=snake_case_,proxies=snake_case_,resume_download=snake_case_,local_files_only=snake_case_,use_auth_token=snake_case_,user_agent=snake_case_,subfolder=snake_case_,revision=revision or commit_hash,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 26 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowercase : List[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowercase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : int="auto" , lowercase_ : Tuple=-1 , lowercase_ : str=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : List[Any]=-1 , lowercase_ : str=1024 , lowercase_ : List[str]=25 , lowercase_ : str=5 , lowercase_ : List[Any]=True , lowercase_ : Tuple=25 , ):
lowercase_ : List[str] = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 239 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> Dict:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> str:
"""simple docstring"""
if location := find_empty_location(UpperCAmelCase_ ):
UpperCamelCase , UpperCamelCase :int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase :Any = digit
if sudoku(UpperCAmelCase_ ) is not None:
return grid
UpperCamelCase :Tuple = 0
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
for row in grid:
for cell in row:
print(UpperCAmelCase_ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
UpperCAmelCase_ : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 352 |
from math import pi
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 62 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase = False
try:
lowerCamelCase = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : list = [] )-> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = choices
lowerCAmelCase__ : List[Any] = prompt
if sys.platform == "win32":
lowerCAmelCase__ : str = '''*'''
else:
lowerCAmelCase__ : Union[str, Any] = '''➔ '''
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str = "" )-> Optional[int]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int )-> List[Any]:
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Direction , _SCREAMING_SNAKE_CASE : int = 1 )-> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCAmelCase__( self : str )-> Union[str, Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCAmelCase__( self : str )-> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCAmelCase__( self : str )-> int:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCAmelCase__( self : Any )-> Dict:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def UpperCAmelCase__( self : Dict )-> str:
lowerCAmelCase__ : Dict = int(chr(self.current_selection ) )
lowerCAmelCase__ : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : int = 0 )-> Tuple:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowerCAmelCase__ : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCAmelCase__ : Optional[int] = int(builtins.input() )
except ValueError:
lowerCAmelCase__ : Optional[int] = default_choice
else:
lowerCAmelCase__ : str = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , '''\n''' )
return choice
| 131 |
from copy import deepcopy
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : list[int] | None = None , _SCREAMING_SNAKE_CASE : int | None = None )-> None:
if arr is None and size is not None:
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : list[int] )-> None:
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = deepcopy(_SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
lowerCAmelCase__ : Optional[Any] = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase__( self : Optional[int] )-> list[int]:
lowerCAmelCase__ : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ : Dict = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index - (index & (-index))
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : List[str] = self.next_(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
self.add(_SCREAMING_SNAKE_CASE , value - self.get(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> int:
if right == 0:
return 0
lowerCAmelCase__ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : Dict = self.prev(_SCREAMING_SNAKE_CASE )
return result
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> int:
return self.prefix(_SCREAMING_SNAKE_CASE ) - self.prefix(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> int:
return self.query(_SCREAMING_SNAKE_CASE , index + 1 )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int )-> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : List[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
'''simple docstring'''
print("Making key files...")
make_key_files("rsa", 1024)
print("Key files generation successful.")
def _A ( UpperCamelCase_ : int) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
print("Generating prime q...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
__lowercase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
__lowercase = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(UpperCamelCase_, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
__lowercase = cryptoMath.find_mod_inverse(UpperCamelCase_, (p - 1) * (q - 1))
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print("\nWARNING:")
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program.")
sys.exit()
__lowercase ,__lowercase = generate_key(UpperCamelCase_)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 144 | 1 |
def A ( a_ ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def A ( a_ ) -> bool:
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : Optional[int] =number
while duplicate > 0:
__UpperCamelCase , __UpperCamelCase : Any =divmod(a_ ,10 )
fact_sum += factorial(a_ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
A_ :Tuple = int(input('''Enter number: ''').strip())
print(
f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 71 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = "open-llama"
def __init__( self , A_=10_0000 , A_=4096 , A_=1_1008 , A_=32 , A_=32 , A_="silu" , A_=2048 , A_=0.0_2 , A_=1e-6 , A_=True , A_=0 , A_=1 , A_=2 , A_=False , A_=True , A_=0.1 , A_=0.1 , A_=True , A_=True , A_=None , **A_ , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = initializer_range
lowerCAmelCase = rms_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = kwargs.pop(
"""use_memorry_efficient_attention""" , A_ )
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_dropout_prob
lowerCAmelCase = use_stable_embedding
lowerCAmelCase = shared_input_output_embedding
lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def __snake_case ( self ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowerCAmelCase = self.rope_scaling.get("""type""" , A_ )
lowerCAmelCase = self.rope_scaling.get("""factor""" , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' ) | 187 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = 3
lowerCAmelCase = 250
lowerCAmelCase = ids_tensor((batch_size, length) , A_ )
lowerCAmelCase = torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length
return input_ids, scores
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = MaxLengthCriteria(max_length=10 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A_ ) , 1 ) | 187 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = """levit"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[4, 8, 12] , SCREAMING_SNAKE_CASE__ : Tuple=[4, 4, 4] , SCREAMING_SNAKE_CASE__ : Optional[int]=[16, 16, 16] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE__ : int=0.02 , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase__ ( __lowercase ):
a__ : str = version.parse("""1.11""" )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self : str ) -> float:
return 1e-4
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Dict ,**_a : List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,)
_a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Any = image_classifier(_lowerCAmelCase ,candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCAmelCase ) ,[
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] ,)
_a : Any = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
] ,)
@require_tf
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,framework='tf' )
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Union[str, Any] = image_classifier(_lowerCAmelCase ,candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] ,)
_a : str = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
{'score': 0.333, 'label': ANY(_lowerCAmelCase )},
],
] ,)
@slow
@require_torch
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,)
# This is an image of 2 cats with remotes and no planes
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Tuple = image_classifier(_lowerCAmelCase ,candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
_a : Any = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
@slow
@require_tf
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,framework='tf' )
# This is an image of 2 cats with remotes and no planes
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Union[str, Any] = image_classifier(_lowerCAmelCase ,candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
_a : List[Any] = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 0 |
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if len(_lowercase ) != len(_lowercase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_A = [p / w for p, w in zip(_lowercase , _lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_A = sorted(_lowercase )
# declaring useful variables
_A = len(_lowercase )
_A = 0
_A = 0
_A = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_A = sorted_profit_by_weight[length - i - 1]
_A = profit_by_weight.index(_lowercase )
_A = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__A = [int(x) for x in input('Input profits separated by spaces: ').split()]
__A = [int(x) for x in input('Input weights separated by spaces: ').split()]
__A = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 75 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self: Any ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Any:
_A = '''google/ncsnpp-church-256'''
_A = UNetaDModel.from_pretrained(__A )
_A = ScoreSdeVeScheduler.from_pretrained(__A )
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75 | 1 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[str] ) -> str: # noqa: E741
'''simple docstring'''
__lowerCamelCase : str = len(_lowerCamelCase )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = [0] * n
__lowerCamelCase : int = [False] * n
__lowerCamelCase : str = [False] * n
def dfs(_lowerCamelCase: str , _lowerCamelCase: Dict , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] ):
if parent == root:
out_edge_count += 1
__lowerCamelCase : Tuple = True
__lowerCamelCase : List[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__lowerCamelCase : Optional[int] = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__lowerCamelCase : Dict = True
# AP found via cycle
if at == low[to]:
__lowerCamelCase : str = True
else:
__lowerCamelCase : Union[str, Any] = min(low[at] , _lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
__lowerCamelCase : str = 0
__lowerCamelCase : List[str] = dfs(_lowerCamelCase , _lowerCamelCase , -1 , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
__A = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 135 | """simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = {}
if train_file is not None:
__lowerCamelCase : List[Any] = [train_file]
if eval_file is not None:
__lowerCamelCase : List[Any] = [eval_file]
if test_file is not None:
__lowerCamelCase : Optional[int] = [test_file]
__lowerCamelCase : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCamelCase : Optional[Any] = features_name.pop(_lowerCamelCase )
__lowerCamelCase : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCamelCase : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
__lowerCamelCase : Dict = tokenizer.model_input_names
__lowerCamelCase : int = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__lowerCamelCase : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__lowerCamelCase : Optional[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCamelCase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCamelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCamelCase : int = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCamelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__A = logging.getLogger(__name__)
@dataclass
class _snake_case :
snake_case__ = field(metadata={"help": "Which column contains the label"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the training file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the development file"} )
snake_case__ = field(default=a__ , metadata={"help": "The path of the test file"} )
snake_case__ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=a__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCamelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__lowerCamelCase : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCamelCase : Dict = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCamelCase : Any = trainer.evaluate()
__lowerCamelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 135 | 1 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__snake_case =get_logger(__name__)
__snake_case =Path(__file__).parent / """model_card_template.md"""
__snake_case =uuida().hex
__snake_case =os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
__snake_case =os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
__snake_case =HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def a_ ( lowerCamelCase : Union[Dict, str, None] = None ):
lowerCAmelCase = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCamelCase , lowerCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(lowerCamelCase , lowerCamelCase ):
ua += "; " + user_agent
return ua
def a_ ( lowerCamelCase : str , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None ):
if token is None:
lowerCAmelCase = HfFolder.get_token()
if organization is None:
lowerCAmelCase = whoami(lowerCamelCase )['name']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(lowerCamelCase , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
lowerCAmelCase = args.hub_token if hasattr(lowerCamelCase , 'hub_token' ) else None
lowerCAmelCase = get_full_repo_name(lowerCamelCase , token=lowerCamelCase )
lowerCAmelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCamelCase , model_name=lowerCamelCase , repo_name=lowerCamelCase , dataset_name=args.dataset_name if hasattr(lowerCamelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCamelCase , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCamelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCamelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCamelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCamelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCamelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCamelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCamelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
lowerCAmelCase = os.path.join(args.output_dir , 'README.md' )
model_card.save(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[str] , lowerCamelCase : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCAmelCase = str(Path(lowerCamelCase ).as_posix() )
lowerCAmelCase = re.search(R'snapshots/([^/]+)/' , lowerCamelCase )
if search is None:
return None
lowerCAmelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__snake_case =os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
__snake_case =os.path.join(hf_cache_home, """diffusers""")
def a_ ( lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None ):
if new_cache_dir is None:
lowerCAmelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCAmelCase = old_diffusers_cache
lowerCAmelCase = Path(lowerCamelCase ).expanduser()
lowerCAmelCase = Path(lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCAmelCase = new_cache_dir / old_blob_path.relative_to(lowerCamelCase )
new_blob_path.parent.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
os.replace(lowerCamelCase , lowerCamelCase )
try:
os.symlink(lowerCamelCase , lowerCamelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__snake_case =os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
__snake_case =0
else:
with open(cache_version_file) as f:
try:
__snake_case =int(f.read())
except ValueError:
__snake_case =0
if cache_version < 1:
__snake_case =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
__snake_case ="""\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def a_ ( lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
if variant is not None:
lowerCAmelCase = weights_name.split('.' )
lowerCAmelCase = splits[:-1] + [variant] + splits[-1:]
lowerCAmelCase = '.'.join(lowerCamelCase )
return weights_name
def a_ ( lowerCamelCase : Optional[Any] , *,
lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str=None , ):
lowerCAmelCase = str(lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCamelCase ):
if os.path.isfile(os.path.join(lowerCamelCase , lowerCamelCase ) ):
# Load from a PyTorch checkpoint
lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ):
lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
lowerCAmelCase = hf_hub_download(
lowerCamelCase , filename=_add_variant(lowerCamelCase , lowerCamelCase ) , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , user_agent=lowerCamelCase , subfolder=lowerCamelCase , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , lowerCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCamelCase , lowerCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCamelCase , lowerCamelCase )}\' so that the correct variant file can be added.''' , lowerCamelCase , )
try:
# 2. Load model file as usual
lowerCAmelCase = hf_hub_download(
lowerCamelCase , filename=lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , user_agent=lowerCamelCase , subfolder=lowerCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 55 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = torch.exp(lowerCamelCase )
lowerCAmelCase = torch.sum(lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCamelCase ) - B / A
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int ) -> str:
super().__init__()
lowerCAmelCase = config.output_attentions
lowerCAmelCase = config.output_hidden_states
lowerCAmelCase = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase = x
else:
lowerCAmelCase = x
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , ) -> str:
lowerCAmelCase = ()
lowerCAmelCase = ()
lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase = all_attentions + (layer_outputs[1],)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = current_outputs + (all_attentions,)
lowerCAmelCase = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
lowerCAmelCase = highway_exit[0]
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = outputs + (all_attentions,)
lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config
lowerCAmelCase = BertEmbeddings(UpperCAmelCase__ )
lowerCAmelCase = DeeBertEncoder(UpperCAmelCase__ )
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
self.init_weights()
def __UpperCAmelCase ( self : Any ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict ) -> List[Any]:
lowerCAmelCase = value
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , ) -> Optional[int]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase = encoder_attention_mask[:, None, None, :]
lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Dict:
lowerCAmelCase = message
lowerCAmelCase = exit_layer # start from 1!
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict ) -> Optional[int]:
# Pooler
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase = bmodel_output[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , UpperCAmelCase__ : Dict ) -> Any:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = config.num_hidden_layers
lowerCAmelCase = DeeBertModel(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : Optional[Any]=False , ) -> Dict:
lowerCAmelCase = self.num_layers
try:
lowerCAmelCase = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase = outputs[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase = e.message
lowerCAmelCase = e.exit_layer
lowerCAmelCase = outputs[0]
if not self.training:
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase = []
for highway_exit in outputs[-1]:
lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase = (loss,) + outputs
if not self.training:
lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 55 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 1 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = 'T5Config'
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "mt5"
lowerCAmelCase : List[Any] = MTaConfig
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "mt5"
lowerCAmelCase : str = MTaConfig
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "mt5"
lowerCAmelCase : List[Any] = MTaConfig
| 302 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
set_seed(7_7_0)
A : int = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
A : int = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
A : Dict = os.path.dirname(os.path.abspath(__file__))
A : Dict = os.path.join(os.path.expanduser('~'), '.cache')
A : str = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> int:
"""simple docstring"""
lowercase__ = model_type
if use_small:
key += "_small"
return os.path.join(__magic_name__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Any ) -> int:
"""simple docstring"""
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
hf_hub_download(repo_id=__magic_name__ , filename=__magic_name__ , local_dir=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str=False , __magic_name__ : str="text" ) -> int:
"""simple docstring"""
if model_type == "text":
lowercase__ = BarkSemanticModel
lowercase__ = BarkSemanticConfig
lowercase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase__ = BarkCoarseModel
lowercase__ = BarkCoarseConfig
lowercase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase__ = BarkFineModel
lowercase__ = BarkFineConfig
lowercase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase__ = f'''{model_type}_small''' if use_small else model_type
lowercase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__magic_name__ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowercase__ = torch.load(__magic_name__ , map_location=__magic_name__ )
# this is a hack
lowercase__ = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowercase__ = model_args["""vocab_size"""]
lowercase__ = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase__ = model_args.pop("""n_head""" )
lowercase__ = model_args.pop("""n_embd""" )
lowercase__ = model_args.pop("""n_layer""" )
lowercase__ = ConfigClass(**checkpoint["""model_args"""] )
lowercase__ = ModelClass(config=__magic_name__ )
lowercase__ = GenerationConfigClass()
lowercase__ = model_generation_config
lowercase__ = checkpoint["""model"""]
# fixup checkpoint
lowercase__ = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__magic_name__ ):
# replace part of the key with corresponding layer name in HF implementation
lowercase__ = k[len(__magic_name__ ) :]
for old_layer_name in new_layer_name_dict:
lowercase__ = new_k.replace(__magic_name__ , new_layer_name_dict[old_layer_name] )
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase__ = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowercase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase__ = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__magic_name__ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(__magic_name__ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
lowercase__ = model.num_parameters(exclude_embeddings=__magic_name__ )
lowercase__ = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(__magic_name__ , 3 )} loss''' )
model.eval()
model.to(__magic_name__ )
del checkpoint, state_dict
return model
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str=False , __magic_name__ : int="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase__ = """cpu""" # do conversion on cpu
lowercase__ = _get_ckpt_path(__magic_name__ , use_small=__magic_name__ )
lowercase__ = _load_model(__magic_name__ , __magic_name__ , model_type=__magic_name__ , use_small=__magic_name__ )
# load bark initial model
lowercase__ = _bark_load_model(__magic_name__ , """cpu""" , model_type=__magic_name__ , use_small=__magic_name__ )
if model_type == "text":
lowercase__ = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__magic_name__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowercase__ = 5
lowercase__ = 10
if model_type in ["text", "coarse"]:
lowercase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase__ = bark_model(__magic_name__ )[0]
lowercase__ = model(__magic_name__ )
# take last logits
lowercase__ = output_new_model_total.logits[:, [-1], :]
else:
lowercase__ = 3
lowercase__ = 8
lowercase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase__ = model(__magic_name__ , __magic_name__ )
lowercase__ = bark_model(__magic_name__ , __magic_name__ )
lowercase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
lowercase__ = BarkSemanticConfig.from_pretrained(os.path.join(__magic_name__ , """config.json""" ) )
lowercase__ = BarkCoarseConfig.from_pretrained(os.path.join(__magic_name__ , """config.json""" ) )
lowercase__ = BarkFineConfig.from_pretrained(os.path.join(__magic_name__ , """config.json""" ) )
lowercase__ = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowercase__ = BarkSemanticModel.from_pretrained(__magic_name__ )
lowercase__ = BarkCoarseModel.from_pretrained(__magic_name__ )
lowercase__ = BarkFineModel.from_pretrained(__magic_name__ )
lowercase__ = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowercase__ = BarkConfig.from_sub_model_configs(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase__ = BarkModel(__magic_name__ )
lowercase__ = semantic
lowercase__ = coarseAcoustic
lowercase__ = fineAcoustic
lowercase__ = codec
lowercase__ = bark_generation_config
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
bark.save_pretrained(__magic_name__ , repo_id=__magic_name__ , push_to_hub=__magic_name__ )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
A : int = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 305 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
if "model" in orig_key:
lowerCAmelCase__ = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowerCAmelCase__ = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowerCAmelCase__ = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowerCAmelCase__ = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowerCAmelCase__ = orig_key.split('.' )[0].split('_' )[-1]
lowerCAmelCase__ = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
lowerCAmelCase__ = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowerCAmelCase__ = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowerCAmelCase__ = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowerCAmelCase__ = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowerCAmelCase__ = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowerCAmelCase__ = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowerCAmelCase__ = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowerCAmelCase__ = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowerCAmelCase__ = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowerCAmelCase__ = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowerCAmelCase__ = 'yoso.' + orig_key
return orig_key
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = orig_state_dict['cls.predictions.decoder.bias']
lowerCAmelCase__ = torch.arange(UpperCamelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location='cpu' )['model_state_dict']
lowerCAmelCase__ = YosoConfig.from_json_file(UpperCamelCase_ )
lowerCAmelCase__ = YosoForMaskedLM(UpperCamelCase_ )
lowerCAmelCase__ = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase_ )
print(model.load_state_dict(UpperCamelCase_ ) )
model.eval()
model.save_pretrained(UpperCamelCase_ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case : Dict = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 122 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case : Optional[Any] = TypeVar("""KEY""")
__snake_case : str = TypeVar("""VAL""")
@dataclass(frozen=__lowercase , slots=__lowercase)
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class __SCREAMING_SNAKE_CASE ( _Item):
def __init__( self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
__snake_case : int = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]):
def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCAmelCase__ = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 122 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
__lowerCamelCase = 1
__lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
__lowerCamelCase = init[0]
__lowerCamelCase = init[1]
__lowerCamelCase = 0
__lowerCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCamelCase = [[f, g, x, y]]
__lowerCamelCase = False # flag that is set when search is complete
__lowerCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCamelCase = cell.pop()
__lowerCamelCase = next_cell[2]
__lowerCamelCase = next_cell[3]
__lowerCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCamelCase = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
__lowerCamelCase = x + DIRECTIONS[i][0]
__lowerCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCamelCase = g + cost
__lowerCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCamelCase = 1
__lowerCamelCase = i
__lowerCamelCase = []
__lowerCamelCase = goal[0]
__lowerCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCamelCase = x - DIRECTIONS[action[x][y]][0]
__lowerCamelCase = y - DIRECTIONS[action[x][y]][1]
__lowerCamelCase = xa
__lowerCamelCase = ya
invpath.append([x, y] )
__lowerCamelCase = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase =[0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase =[len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase =1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase =9_9
__UpperCAmelCase , __UpperCAmelCase =search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 67 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = (DEISMultistepScheduler,)
snake_case__ : Union[str, Any] = (('num_inference_steps', 25),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase = self.full_loop(scheduler=__lowerCAmelCase )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase = DEISMultistepScheduler.from_config(scheduler.config )
lowercase = self.full_loop(scheduler=__lowerCAmelCase )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , algorithm_type="""deis""" , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
lowercase = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop(prediction_type="""v_prediction""" )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 353 | """simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
A__ : List[Any] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 144 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : List[str]=16 , snake_case__ : Tuple=13 , snake_case__ : Dict=7 , snake_case__ : List[Any]=14 , snake_case__ : List[Any]=10 , snake_case__ : Dict=19 , snake_case__ : List[str]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : str=True , snake_case__ : int=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=4 , snake_case__ : Dict=4 , snake_case__ : int="gelu" , snake_case__ : Dict=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=[1, 2, 3, 4, 5] , snake_case__ : Optional[int]=25 , snake_case__ : Dict=5 , ):
lowerCamelCase_ : Dict =d_model
lowerCamelCase_ : int =parent
lowerCamelCase_ : Optional[Any] =batch_size
lowerCamelCase_ : int =prediction_length
lowerCamelCase_ : Optional[int] =context_length
lowerCamelCase_ : Any =cardinality
lowerCamelCase_ : List[str] =num_time_features
lowerCamelCase_ : List[Any] =lags_sequence
lowerCamelCase_ : Optional[int] =embedding_dimension
lowerCamelCase_ : Union[str, Any] =is_training
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : str =num_hidden_layers
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : Any =intermediate_size
lowerCamelCase_ : Union[str, Any] =hidden_act
lowerCamelCase_ : Optional[int] =hidden_dropout_prob
lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase_ : List[Any] =context_length
lowerCamelCase_ : str =prediction_length + label_length
lowerCamelCase_ : int =label_length
lowerCamelCase_ : Union[str, Any] =moving_average
lowerCamelCase_ : str =autocorrelation_factor
def UpperCAmelCase__ ( self : Any ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[Any] =config.context_length + max(config.lags_sequence )
lowerCamelCase_ : Any =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ : List[Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ : Any =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ : Tuple =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ : Optional[Any] =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ : Any ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =self.get_config()
lowerCamelCase_ : List[Any] =self.prepare_autoformer_inputs_dict(snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : str =AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval()
lowerCamelCase_ : int =model(**snake_case__ )
lowerCamelCase_ : str =outputs.encoder_last_hidden_state
lowerCamelCase_ : Optional[Any] =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Tuple =model.get_encoder()
encoder.save_pretrained(snake_case__ )
lowerCamelCase_ : Any =AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =model.create_network_inputs(**snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ : Dict =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ : int =encoder(inputs_embeds=snake_case__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase_ : str =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ : Optional[int] =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ : Any =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ : Optional[Any] =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[str] =model.get_decoder()
decoder.save_pretrained(snake_case__ )
lowerCamelCase_ : str =AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ : List[str] =decoder(
trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCAmelCase :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :int = False
_UpperCAmelCase :int = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Dict = False
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : List[str] =AutoformerModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : str =model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =inspect.signature(getattr(snake_case__ , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ : Optional[Any] =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
lowerCamelCase_ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[Any] =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : List[str] =getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCamelCase_ : Dict =getattr(self.model_tester , "decoder_seq_length" , snake_case__ )
lowerCamelCase_ : List[Any] =getattr(self.model_tester , "encoder_seq_length" , snake_case__ )
lowerCamelCase_ : Optional[Any] =getattr(self.model_tester , "d_model" , snake_case__ )
lowerCamelCase_ : List[str] =getattr(self.model_tester , "num_attention_heads" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ : str =True
lowerCamelCase_ : int =False
lowerCamelCase_ : Any =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : str =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ : Optional[Any] =len(snake_case__ )
lowerCamelCase_ : List[Any] =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case__ , snake_case__ )
# decoder attentions
lowerCamelCase_ : Union[str, Any] =outputs.decoder_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ : Tuple =outputs.cross_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ : Tuple =True
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Dict =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 2 , len(snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_retain_grad_hidden_states_attentions()
def _snake_case ( lowerCamelCase__ : Tuple="train-batch.pt" ) -> Any:
lowerCamelCase_ : Tuple =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCamelCase__ , repo_type="dataset" )
lowerCamelCase_ : List[Any] =torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
return batch
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : List[str] =prepare_batch()
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase_ : Union[str, Any] =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Dict =torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Optional[int] =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase_ : List[Any] =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Dict =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase_ : Tuple =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case__ )
lowerCamelCase_ : List[str] =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case__ )
lowerCamelCase_ : Any =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
| 144 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : jnp.ndarray
lowercase : jnp.ndarray
class __UpperCamelCase ( nn.Module ):
lowercase : int
lowercase : Tuple[int] =(16, 32, 96, 2_56)
lowercase : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase_ =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ =self.block_out_channels[i]
lowerCamelCase_ =self.block_out_channels[i + 1]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =blocks
lowerCamelCase_ =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
for block in self.blocks:
lowerCamelCase_ =block(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
lowerCamelCase_ =self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class __UpperCamelCase ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =32
lowercase : int =4
lowercase : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase : Union[bool, Tuple[bool]] =False
lowercase : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
lowercase : int =2
lowercase : Union[int, Tuple[int]] =8
lowercase : Optional[Union[int, Tuple[int]]] =None
lowercase : int =12_80
lowercase : float =0.0
lowercase : bool =False
lowercase : jnp.dtype =jnp.floataa
lowercase : bool =True
lowercase : int =0
lowercase : str ="rgb"
lowercase : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_ =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase_ =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase_ =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_, lowerCamelCase_ =jax.random.split(lowerCAmelCase )
lowerCamelCase_ ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.block_out_channels
lowerCamelCase_ =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase_ =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase_ =FlaxTimestepEmbedding(lowerCAmelCase, dtype=self.dtype )
lowerCamelCase_ =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase_ =self.only_cross_attention
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =block_out_channels[0]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ =output_channel
lowerCamelCase_ =block_out_channels[i]
lowerCamelCase_ =i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ =FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase_ =FlaxDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
lowerCamelCase_ =down_blocks
lowerCamelCase_ =controlnet_down_blocks
# mid
lowerCamelCase_ =block_out_channels[-1]
lowerCamelCase_ =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1.0, lowerCAmelCase = True, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ =jnp.flip(lowerCAmelCase, axis=1 )
# 1. time
if not isinstance(lowerCAmelCase, jnp.ndarray ):
lowerCamelCase_ =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCAmelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ =jnp.expand_dims(lowerCAmelCase, 0 )
lowerCamelCase_ =self.time_proj(lowerCAmelCase )
lowerCamelCase_ =self.time_embedding(lowerCAmelCase )
# 2. pre-process
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase_ =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
else:
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ =self.mid_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ =()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase, self.controlnet_down_blocks ):
lowerCamelCase_ =controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ =controlnet_down_block_res_samples
lowerCamelCase_ =self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
lowerCamelCase_ =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase, mid_block_res_sample=lowerCAmelCase )
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MgpstrTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = {}
lowerCAmelCase_ = False
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
snake_case_ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
def snake_case__ ( self : Union[str, Any] , **__lowercase : List[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case__ ( self : int , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = "tester"
snake_case_ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
snake_case_ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ = tokenizer.encode([special_token] , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
snake_case_ = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
snake_case_ , snake_case_ = self.get_input_output_texts(__lowercase )
snake_case_ = tokenizer.tokenize(__lowercase )
snake_case_ = tokenizer.convert_tokens_to_ids(__lowercase )
snake_case_ = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
snake_case_ = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertNotEqual(len(__lowercase ) , 0 )
snake_case_ = tokenizer.decode(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(text_a.replace(" " , "" ) , __lowercase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self : Any ):
"""simple docstring"""
pass
| 187 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while b:
A_ : Tuple = b, a % b
return a
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def _SCREAMING_SNAKE_CASE ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 365 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["image_processor", "tokenizer"]
snake_case = "CLIPImageProcessor"
snake_case = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
A_ : Tuple = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A_ : List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
A_ : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = self.tokenizer.model_input_names
A_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 65 | 0 |
SCREAMING_SNAKE_CASE__ : Optional[int] = 65521
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[str] = 1
lowerCamelCase : str = 0
for plain_chr in plain_text:
lowerCamelCase : Dict = (a + ord(_SCREAMING_SNAKE_CASE )) % MOD_ADLER
lowerCamelCase : Any = (b + a) % MOD_ADLER
return (b << 16) | a
| 48 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = OpenAIGPTTokenizer
__lowercase = OpenAIGPTTokenizerFast
__lowercase = True
__lowercase = False
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowercase_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCAmelCase_ ( self :Dict , lowercase_ :str )-> Optional[int]:
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self :str )-> List[Any]:
A__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A__ = "lower"
A__ = ["low", "er</w>"]
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokens + ["<unk>"]
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self :Any , lowercase_ :Optional[Any]=15 )-> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
A__ = "This is a simple input"
A__ = ["This is a simple input 1", "This is a simple input 2"]
A__ = ("This is a simple input", "This is a pair")
A__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ ):
pass
| 123 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
A__ = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 123 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a_ ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : int , __snake_case : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
lowerCamelCase_ =1
lowerCamelCase_ =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
lowerCamelCase_ =init[0]
lowerCamelCase_ =init[1]
lowerCamelCase_ =0
lowerCamelCase_ =g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase_ =[[f, g, x, y]]
lowerCamelCase_ =False # flag that is set when search is complete
lowerCamelCase_ =False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase_ =cell.pop()
lowerCamelCase_ =next_cell[2]
lowerCamelCase_ =next_cell[3]
lowerCamelCase_ =next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase_ =True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
lowerCamelCase_ =x + DIRECTIONS[i][0]
lowerCamelCase_ =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase_ =g + cost
lowerCamelCase_ =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase_ =1
lowerCamelCase_ =i
lowerCamelCase_ =[]
lowerCamelCase_ =goal[0]
lowerCamelCase_ =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase_ =x - DIRECTIONS[action[x][y]][0]
lowerCamelCase_ =y - DIRECTIONS[action[x][y]][1]
lowerCamelCase_ =xa
lowerCamelCase_ =ya
invpath.append([x, y] )
lowerCamelCase_ =[]
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
a_ : Dict = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a_ : Tuple = [0, 0]
# all coordinates are given in format [y,x]
a_ : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
a_ : List[Any] = 1
# the cost map which pushes the path closer to the goal
a_ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a_ : int = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a_ : int = 99
a_ , a_ : str = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 75 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75 | 1 |
'''simple docstring'''
import math
class _snake_case :
def lowerCAmelCase__ ( self , a__ , a__ ) -> int:
'''simple docstring'''
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(a__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(a__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ = SelfOrganizingMap()
snake_case_ = 3
snake_case_ = 0.5
for _ in range(snake_case ):
for j in range(len(snake_case ) ):
# training sample
snake_case_ = training_samples[j]
# Compute the winning vector
snake_case_ = self_organizing_map.get_winner(snake_case , snake_case )
# Update the winning vector
snake_case_ = self_organizing_map.update(snake_case , snake_case , snake_case , snake_case )
# classify test sample
snake_case_ = [0, 0, 0, 1]
snake_case_ = self_organizing_map.get_winner(snake_case , snake_case )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "decision_transformer"
lowerCAmelCase_ : List[Any] = ["past_key_values"]
lowerCAmelCase_ : Tuple = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=17 , a__=4 , a__=128 , a__=4_096 , a__=True , a__=1 , a__=1_024 , a__=3 , a__=1 , a__=None , a__="relu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=1e-5 , a__=0.0_2 , a__=True , a__=True , a__=50_256 , a__=50_256 , a__=False , a__=False , **a__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = state_dim
snake_case_ = act_dim
snake_case_ = hidden_size
snake_case_ = max_ep_len
snake_case_ = action_tanh
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = scale_attn_weights
snake_case_ = use_cache
snake_case_ = scale_attn_by_inverse_layer_idx
snake_case_ = reorder_and_upcast_attn
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 92 | 1 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : list ):
if len(UpperCAmelCase_ ) < 2:
return collection
def circle_sort_util(UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
lowerCamelCase_ = False
if low == high:
return swapped
lowerCamelCase_ = low
lowerCamelCase_ = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase_ ,lowerCamelCase_ = (
collection[right],
collection[left],
)
lowerCamelCase_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase_ ,lowerCamelCase_ = (
collection[right + 1],
collection[left],
)
lowerCamelCase_ = True
lowerCamelCase_ = low + int((high - low) / 2 )
lowerCamelCase_ = circle_sort_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = circle_sort_util(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
return swapped or left_swap or right_swap
lowerCamelCase_ = True
while is_not_sorted is True:
lowerCamelCase_ = circle_sort_util(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
a_ : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
a_ : Any = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 55 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = PhobertTokenizer
a_ = False
def _lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Dict = ["#version: 0.2", "l à</w>"]
snake_case__ : Optional[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : List[str] , **__A : Any ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "Tôi là VinAI Research"
snake_case__ : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = "Tôi là VinAI Research"
snake_case__ : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case__ : int = tokenizer.tokenize(__A )
print(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__a = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__a = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a = """weight_g"""
elif "weight_v" in name:
__a = """weight_v"""
elif "weight" in name:
__a = """weight"""
elif "bias" in name:
__a = """bias"""
else:
__a = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = full_name.split("""conv_layers.""" )[-1]
__a = name.split(""".""" )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__a = SEWConfig()
if is_finetuned:
__a = model.wav_encoder.wav_model.cfg
else:
__a = model.cfg
__a = fs_config.conv_bias
__a = eval(fs_config.conv_feature_layers )
__a = [x[0] for x in conv_layers]
__a = [x[1] for x in conv_layers]
__a = [x[2] for x in conv_layers]
__a = """gelu"""
__a = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__a = 0.0
__a = fs_config.activation_fn.name
__a = fs_config.encoder_embed_dim
__a = 0.02
__a = fs_config.encoder_ffn_embed_dim
__a = 1e-5
__a = fs_config.encoder_layerdrop
__a = fs_config.encoder_attention_heads
__a = fs_config.conv_pos_groups
__a = fs_config.conv_pos
__a = len(_SCREAMING_SNAKE_CASE )
__a = fs_config.encoder_layers
__a = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__a = model.cfg
__a = fs_config.final_dropout
__a = fs_config.layerdrop
__a = fs_config.activation_dropout
__a = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__a = fs_config.attention_dropout
__a = fs_config.dropout_input
__a = fs_config.dropout
__a = fs_config.mask_channel_length
__a = fs_config.mask_channel_prob
__a = fs_config.mask_length
__a = fs_config.mask_prob
__a = """Wav2Vec2FeatureExtractor"""
__a = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=True ):
"""simple docstring"""
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__a = SEWConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a = convert_config(model[0] , _SCREAMING_SNAKE_CASE )
__a = model[0].eval()
__a = True if config.feat_extract_norm == """layer""" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
__a = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _SCREAMING_SNAKE_CASE )
__a = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
__a = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
__a = SEWForCTC(_SCREAMING_SNAKE_CASE )
else:
__a = SEWModel(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 302 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_snake_case : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_snake_case : str = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_snake_case : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] )
return (item, float(UpperCamelCase ))
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = random.randint(0 , len(UpperCamelCase ) - 1 )
_a = parent_a[:random_slice] + parent_a[random_slice:]
_a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case_ (UpperCamelCase : str , UpperCamelCase : list[str] ):
'''simple docstring'''
_a = list(UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a = random.choice(UpperCamelCase )
return "".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : tuple[str, float] , UpperCamelCase : list[tuple[str, float]] , UpperCamelCase : list[str] , ):
'''simple docstring'''
_a = []
# Generate more children proportionally to the fitness score.
_a = int(parent_a[1] * 100 ) + 1
_a = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase ):
_a = population_score[random.randint(0 , UpperCamelCase )][0]
_a , _a = crossover(parent_a[0] , UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
return pop
def snake_case_ (UpperCamelCase : str , UpperCamelCase : list[str] , UpperCamelCase : bool = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(UpperCamelCase )
# Generate random starting population.
_a = []
for _ in range(UpperCamelCase ):
population.append(''''''.join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
_a = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase )
# Normalize population score to be between 0 and 1.
_a = [
(item, score / len(UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase ):
population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_snake_case : Optional[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
_snake_case : Tuple = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
_snake_case : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 356 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str=0 ):
'''simple docstring'''
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str]=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_a = min(UpperCamelCase , UpperCamelCase )
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
_a = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = column_based_sort(UpperCamelCase , column=0 )
_a = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 179 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = """MCTCTFeatureExtractor"""
A__ : Optional[int] = """AutoTokenizer"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
UpperCamelCase_ = kwargs.pop("""raw_speech""" )
else:
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if text is not None:
UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase_ = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""input_features""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""labels""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if input_features is not None:
UpperCamelCase_ = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if labels is not None:
UpperCamelCase_ = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase_ = labels["""input_ids"""]
return input_features
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
UpperCamelCase_ = True
UpperCamelCase_ = self.tokenizer
yield
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
| 122 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : int = ["""image_processor""", """tokenizer"""]
A__ : Union[str, Any] = """LayoutLMv2ImageProcessor"""
A__ : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
UpperCamelCase_ = kwargs.pop("""feature_extractor""" )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ = features["""words"""]
UpperCamelCase_ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
UpperCamelCase_ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase_ = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase_ = images
return encoded_inputs
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 122 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
UpperCamelCase__ : str = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ : int = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 365 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = "cpu"
__UpperCamelCase : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCamelCase : int = "path-to-your-trained-model"
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : Any = torch.randn(2, 77, 768)
__UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : Optional[Any] = 666
__UpperCamelCase : int = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : int = {"generator": generator}
if args.steps is not None:
__UpperCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowercase__ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def __a ( self , __UpperCamelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def __a ( self ) -> str:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ : Optional[int] = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ : str = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
# get prompt text embeddings
snake_case__ : List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case__ : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case__ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case__ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case__ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case__ : Any = text_embeddings.shape
snake_case__ : Optional[int] = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
snake_case__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str]
if negative_prompt is None:
snake_case__ : str = ['']
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !="""
F""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ : Any = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
snake_case__ : List[Any] = negative_prompt
snake_case__ : int = text_input_ids.shape[-1]
snake_case__ : Tuple = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
snake_case__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ : Optional[Any] = uncond_embeddings.shape[1]
snake_case__ : int = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case__ : Tuple = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device='cpu' , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
snake_case__ : Optional[Any] = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device='cpu' , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
snake_case__ : Union[str, Any] = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
snake_case__ : Any = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case__ : Optional[Any] = latents_reference.to(self.device )
snake_case__ : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case__ : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case__ : Any = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case__ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case__ : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case__ : Dict = 0 if dx < 0 else dx
snake_case__ : Any = 0 if dy < 0 else dy
snake_case__ : Optional[int] = max(-dx , 0 )
snake_case__ : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case__ : Dict = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case__ : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Optional[int] = {}
if accepts_eta:
snake_case__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : Any = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
snake_case__ : Optional[int] = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case__ : Optional[int] = noise_pred.chunk(2 )
snake_case__ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Any = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case__ : Dict = 1 / 0.1_8_2_1_5 * latents
snake_case__ : Dict = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
snake_case__ : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case__ : int = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors='pt' ).to(
self.device )
snake_case__ : str = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case__ : List[Any] = None
if output_type == "pil":
snake_case__ : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 143 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 8_8 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : Optional[int] = None , ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase__ , attention_head_dim=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , num_layers=UpperCAmelCase__ , dropout=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , sample_size=UpperCAmelCase__ , num_vector_embeds=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , num_embeds_ada_norm=UpperCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : bool = True , ) -> str:
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase__ )
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __a (unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=7 , __magic_name__ : List[str]=3 , __magic_name__ : Dict=18 , __magic_name__ : Union[str, Any]=30 , __magic_name__ : int=4_00 , __magic_name__ : Any=None , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : Optional[int] = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : str = do_convert_rgb
UpperCAmelCase_ : int = [5_12, 10_24, 20_48, 40_96]
UpperCAmelCase_ : Any = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Union[str, Any] = 20_48
UpperCAmelCase_ : Union[str, Any] = image_processor(_snake_case , return_tensors='''pt''' , max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
UpperCAmelCase_ : Optional[int] = '''Hello'''
UpperCAmelCase_ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case , header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : int = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : List[Any] = 3
@property
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
_snake_case , return_tensors='''pt''' , max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 125 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase: Optional[int] = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" ,UpperCAmelCase_ ,)
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
_snake_case = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
_snake_case = '▁'
# Segments (not really needed)
_snake_case = 0
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
class UpperCamelCase ( UpperCAmelCase_ ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = 'left'
UpperCamelCase : List[str] = XLNetTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : str="<cls>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : Optional[int]=["<eop>", "<eod>"] , **UpperCAmelCase__ : Any , ) -> Any:
_a : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_a : str = 3
_a : Dict = do_lower_case
_a : Optional[Any] = remove_space
_a : List[str] = keep_accents
_a : str = vocab_file
_a : str = False if not self.vocab_file else True
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 294 | import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for attribute in key.split('.' ):
__lowerCamelCase : List[str] = getattr(__snake_case , __snake_case )
if weight_type is not None:
__lowerCamelCase : List[str] = getattr(__snake_case , __snake_case ).shape
else:
__lowerCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Tuple = value
else:
__lowerCamelCase : Any = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = []
__lowerCamelCase : str = fairseq_model.state_dict()
__lowerCamelCase : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : List[Any] = True
if "*" in mapped_key:
__lowerCamelCase : List[str] = name.split(__snake_case )[0].split('.' )[-2]
__lowerCamelCase : str = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__lowerCamelCase : Optional[int] = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : str = 'weight_v'
elif "weight" in name:
__lowerCamelCase : List[Any] = 'weight'
elif "bias" in name:
__lowerCamelCase : str = 'bias'
else:
__lowerCamelCase : Tuple = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : List[Any] = name.split('.' )
__lowerCamelCase : int = int(items[0] )
__lowerCamelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = SEWConfig()
if is_finetuned:
__lowerCamelCase : Any = model.wav_encoder.wav_model.cfg
else:
__lowerCamelCase : str = model.cfg
__lowerCamelCase : Union[str, Any] = fs_config.conv_bias
__lowerCamelCase : List[Any] = eval(fs_config.conv_feature_layers )
__lowerCamelCase : List[Any] = [x[0] for x in conv_layers]
__lowerCamelCase : Tuple = [x[1] for x in conv_layers]
__lowerCamelCase : Optional[Any] = [x[2] for x in conv_layers]
__lowerCamelCase : List[Any] = 'gelu'
__lowerCamelCase : Optional[int] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__lowerCamelCase : Tuple = 0.0
__lowerCamelCase : str = fs_config.activation_fn.name
__lowerCamelCase : List[Any] = fs_config.encoder_embed_dim
__lowerCamelCase : Any = 0.02
__lowerCamelCase : Any = fs_config.encoder_ffn_embed_dim
__lowerCamelCase : Optional[Any] = 1e-5
__lowerCamelCase : List[str] = fs_config.encoder_layerdrop
__lowerCamelCase : Any = fs_config.encoder_attention_heads
__lowerCamelCase : int = fs_config.conv_pos_groups
__lowerCamelCase : Any = fs_config.conv_pos
__lowerCamelCase : str = len(__snake_case )
__lowerCamelCase : Optional[int] = fs_config.encoder_layers
__lowerCamelCase : Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCamelCase : Any = model.cfg
__lowerCamelCase : Optional[Any] = fs_config.final_dropout
__lowerCamelCase : Dict = fs_config.layerdrop
__lowerCamelCase : str = fs_config.activation_dropout
__lowerCamelCase : str = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCamelCase : str = fs_config.attention_dropout
__lowerCamelCase : Dict = fs_config.dropout_input
__lowerCamelCase : List[Any] = fs_config.dropout
__lowerCamelCase : List[Any] = fs_config.mask_channel_length
__lowerCamelCase : List[str] = fs_config.mask_channel_prob
__lowerCamelCase : List[Any] = fs_config.mask_length
__lowerCamelCase : Tuple = fs_config.mask_prob
__lowerCamelCase : Optional[Any] = 'Wav2Vec2FeatureExtractor'
__lowerCamelCase : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCamelCase : Any = SEWConfig.from_pretrained(__snake_case )
else:
__lowerCamelCase : List[Any] = convert_config(model[0] , __snake_case )
__lowerCamelCase : List[Any] = model[0].eval()
__lowerCamelCase : List[Any] = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
if is_finetuned:
if dict_path:
__lowerCamelCase : Tuple = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : List[str] = target_dict.pad_index
__lowerCamelCase : Any = target_dict.bos_index
__lowerCamelCase : List[Any] = target_dict.pad_index
__lowerCamelCase : Dict = target_dict.bos_index
__lowerCamelCase : Tuple = target_dict.eos_index
__lowerCamelCase : Dict = len(target_dict.symbols )
__lowerCamelCase : List[Any] = os.path.join(__snake_case , 'vocab.json' )
if not os.path.isdir(__snake_case ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
__lowerCamelCase : Union[str, Any] = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__snake_case , )
__lowerCamelCase : Dict = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
__lowerCamelCase : Dict = SEWForCTC(__snake_case )
else:
__lowerCamelCase : str = SEWModel(__snake_case )
feature_extractor.save_pretrained(__snake_case )
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 362 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_text_model"""
def __init__( self: Optional[int] , a: Dict=4_9408 , a: Optional[Any]=512 , a: Dict=2048 , a: Optional[Any]=12 , a: Tuple=8 , a: Union[str, Any]=16 , a: str="quick_gelu" , a: List[Any]=1e-5 , a: Dict=0.0 , a: Optional[int]=0.0_2 , a: Dict=1.0 , a: Any=0 , a: Union[str, Any]=4_9406 , a: Any=4_9407 , **a: Dict , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Tuple = initializer_factor
@classmethod
def _snake_case ( cls: Dict , a: Union[str, os.PathLike] , **a: Optional[int] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_vision_model"""
def __init__( self: int , a: Tuple=768 , a: int=3072 , a: List[str]=12 , a: Optional[Any]=12 , a: Optional[int]=3 , a: Optional[int]=768 , a: Optional[Any]=32 , a: Optional[int]="quick_gelu" , a: Union[str, Any]=1e-5 , a: Union[str, Any]=0.0 , a: Union[str, Any]=0.0_2 , a: int=1.0 , **a: Union[str, Any] , ):
super().__init__(**a )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : Tuple = patch_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
@classmethod
def _snake_case ( cls: Optional[int] , a: Union[str, os.PathLike] , **a: int ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit"""
__snake_case = True
def __init__( self: Dict , a: int=None , a: str=None , a: Tuple=512 , a: Tuple=2.6_5_9_2 , a: int=True , **a: int , ):
super().__init__(**a )
if text_config is None:
__lowerCamelCase : List[str] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : str = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[Any] = OwlViTTextConfig(**a )
__lowerCamelCase : str = OwlViTVisionConfig(**a )
__lowerCamelCase : Union[str, Any] = projection_dim
__lowerCamelCase : Tuple = logit_scale_init_value
__lowerCamelCase : Dict = return_dict
__lowerCamelCase : Tuple = 1.0
@classmethod
def _snake_case ( cls: str , a: Union[str, os.PathLike] , **a: List[Any] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : List[Any] = cls.get_config_dict(a , **a )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
@classmethod
def _snake_case ( cls: Tuple , a: Dict , a: Dict , **a: str ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = text_config
__lowerCamelCase : Optional[int] = vision_config
return cls.from_dict(a , **a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.text_config.to_dict()
__lowerCamelCase : List[str] = self.vision_config.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: str ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _snake_case ( self: Dict ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _snake_case ( self: int ):
return 1e-4
def _snake_case ( self: Any , a: "ProcessorMixin" , a: int = -1 , a: int = -1 , a: Optional["TensorType"] = None , ):
__lowerCamelCase : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a , seq_length=a , framework=a )
__lowerCamelCase : int = super().generate_dummy_inputs(
processor.image_processor , batch_size=a , framework=a )
return {**text_input_dict, **image_input_dict}
@property
def _snake_case ( self: int ):
return 14
| 194 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Initialise PyTorch model
__snake_case : Union[str, Any] = FunnelConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__snake_case : Union[str, Any] = FunnelBaseModel(__lowerCamelCase ) if base_model else FunnelModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 123 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase_( a__=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : Tuple = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : int = script_name
else:
SCREAMING_SNAKE_CASE : str = F"""--config_file={args.config_file} {script_name}"""
SCREAMING_SNAKE_CASE : Dict = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : str = execute_subprocess_async(a__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a__ )
if __name__ == "__main__":
main()
| 364 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a__ : Optional[Any] = {'''mobilebert-uncased''': 512}
a__ : List[Any] = {}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[int]:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19 | 0 |
from __future__ import annotations
from math import pow, sqrt
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) + pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
__lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / "test_results.txt"
__lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 92 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
UpperCAmelCase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(__lowerCAmelCase )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ = '''weight_v'''
elif "weight" in name:
UpperCAmelCase_ = '''weight'''
elif "bias" in name:
UpperCAmelCase_ = '''bias'''
else:
UpperCAmelCase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = SEWConfig()
if is_finetuned:
UpperCAmelCase_ = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase_ = model.cfg
UpperCAmelCase_ = fs_config.conv_bias
UpperCAmelCase_ = eval(fs_config.conv_feature_layers )
UpperCAmelCase_ = [x[0] for x in conv_layers]
UpperCAmelCase_ = [x[1] for x in conv_layers]
UpperCAmelCase_ = [x[2] for x in conv_layers]
UpperCAmelCase_ = '''gelu'''
UpperCAmelCase_ = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = fs_config.activation_fn.name
UpperCAmelCase_ = fs_config.encoder_embed_dim
UpperCAmelCase_ = 0.02
UpperCAmelCase_ = fs_config.encoder_ffn_embed_dim
UpperCAmelCase_ = 1e-5
UpperCAmelCase_ = fs_config.encoder_layerdrop
UpperCAmelCase_ = fs_config.encoder_attention_heads
UpperCAmelCase_ = fs_config.conv_pos_groups
UpperCAmelCase_ = fs_config.conv_pos
UpperCAmelCase_ = len(__lowerCAmelCase )
UpperCAmelCase_ = fs_config.encoder_layers
UpperCAmelCase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase_ = model.cfg
UpperCAmelCase_ = fs_config.final_dropout
UpperCAmelCase_ = fs_config.layerdrop
UpperCAmelCase_ = fs_config.activation_dropout
UpperCAmelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase_ = fs_config.attention_dropout
UpperCAmelCase_ = fs_config.dropout_input
UpperCAmelCase_ = fs_config.dropout
UpperCAmelCase_ = fs_config.mask_channel_length
UpperCAmelCase_ = fs_config.mask_channel_prob
UpperCAmelCase_ = fs_config.mask_length
UpperCAmelCase_ = fs_config.mask_prob
UpperCAmelCase_ = '''Wav2Vec2FeatureExtractor'''
UpperCAmelCase_ = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if is_finetuned:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase_ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCAmelCase_ = convert_config(model[0] , __lowerCAmelCase )
UpperCAmelCase_ = model[0].eval()
UpperCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(__lowerCAmelCase , "vocab.json" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCAmelCase , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
UpperCAmelCase_ = SEWForCTC(__lowerCAmelCase )
else:
UpperCAmelCase_ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 362 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 241 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Optional[Any] = seq_length
A_ : int = is_training
A_ : Union[str, Any] = use_token_type_ids
A_ : Optional[int] = use_labels
A_ : List[str] = vocab_size
A_ : Optional[int] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : List[str] = hidden_act
A_ : Dict = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Dict = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Tuple = num_labels
A_ : List[Any] = num_choices
A_ : Tuple = scope
A_ : List[Any] = self.vocab_size - 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = None
if self.use_token_type_ids:
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Any = None
A_ : Any = None
A_ : Dict = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
"""simple docstring"""
A_ : List[str] = OpenAIGPTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : List[str] = model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
A_ : List[str] = model(snake_case_ , token_type_ids=snake_case_ )
A_ : Dict = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = OpenAIGPTLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : str = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = OpenAIGPTDoubleHeadsModel(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Optional[int] = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : List[Any] = OpenAIGPTForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[str] = config_and_inputs
A_ : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : int = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=False ):
"""simple docstring"""
A_ : str = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ , )
A_ : int = inputs_dict['labels']
A_ : Optional[int] = inputs_dict['labels']
A_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case_ , )
A_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=snake_case_ , n_embd=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Any = OpenAIGPTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case_ )
A_ : Optional[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=snake_case_ ) # the president is
A_ : List[str] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : List[str] = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ ) | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 4 , __UpperCAmelCase=32 * 6 , __UpperCAmelCase=4 , __UpperCAmelCase=32 , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =is_training
_lowerCAmelCase =use_auxiliary_loss
_lowerCAmelCase =num_queries
_lowerCAmelCase =num_channels
_lowerCAmelCase =min_size
_lowerCAmelCase =max_size
_lowerCAmelCase =num_labels
_lowerCAmelCase =mask_feature_size
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
_lowerCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase )
_lowerCAmelCase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5
).float()
_lowerCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long()
_lowerCAmelCase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCAmelCase ( self ) -> int:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =output.encoder_hidden_states
_lowerCAmelCase =output.pixel_decoder_hidden_states
_lowerCAmelCase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_config.decoder_layers )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Any:
with torch.no_grad():
_lowerCAmelCase =MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
_lowerCAmelCase =model(
pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =MaskFormerModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase =MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =(self.model_tester.min_size,) * 2
_lowerCAmelCase ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(),
}
_lowerCAmelCase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _lowerCAmelCase ( self ) -> Dict:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase =self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase =self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
_lowerCAmelCase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1E-4
def _lowerCamelCase() -> List[Any]:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
_lowerCAmelCase =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
_lowerCAmelCase =torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
_lowerCAmelCase =torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
_lowerCAmelCase =torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =(
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
_lowerCAmelCase =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase =[
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase =torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =(
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
_lowerCAmelCase =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase =[[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -10.77_11]]
_lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase =torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =(
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase =inputs["""pixel_values"""].to(__UpperCAmelCase )
_lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :list[float] ) -> bool:
'''simple docstring'''
if len(snake_case_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_a = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = 1
__A : Any = 3
__A : List[str] = (32, 32)
__A : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet_upscale
__A : Union[str, Any] = DDPMScheduler()
__A : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__A : int = self.dummy_vae
__A : int = self.dummy_text_encoder
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = '''A painting of a squirrel eating a burger'''
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
__A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : str = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
__A : Tuple = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.dummy_cond_unet_upscale
__A : List[str] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[int] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Any = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Any = '''A painting of a squirrel eating a burger'''
__A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_cond_unet_upscale
__A : int = DDPMScheduler()
__A : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[Any] = self.dummy_vae
__A : List[str] = self.dummy_text_encoder
__A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__A : Union[str, Any] = unet.half()
__A : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__A : Optional[Any] = torch.manual_seed(0 )
__A : Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
__A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__A : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Union[str, Any] = '''a cat sitting on a park bench'''
__A : Union[str, Any] = torch.manual_seed(0 )
__A : Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__A : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Dict = '''a cat sitting on a park bench'''
__A : Any = torch.manual_seed(0 )
__A : Optional[int] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__A : Tuple = '''a cat sitting on a park bench'''
__A : Tuple = torch.manual_seed(0 )
__A : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 179 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , "rb" ) as fp:
snake_case : int = pickle.load(lowercase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case : str = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
snake_case : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowercase )
snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case : Union[str, Any] = os.path.abspath(lowercase )
snake_case : str = os.path.abspath(lowercase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case : int = TransfoXLConfig()
else:
snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = TransfoXLLMHeadModel(lowercase )
snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
snake_case : Optional[Any] = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 112 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCAmelCase : Optional[int] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__( self : Optional[int] , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 20.0 , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[str] , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : Dict = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : List[Any] = backbone_config.pop('model_type' )
lowerCamelCase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[int] = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Any = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : List[str] = (
decoder_config.pop('model_type' ) if isinstance(_snake_case , _snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : Any = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : int = config_class.from_dict(_snake_case )
lowerCamelCase__ : List[Any] = backbone_config
lowerCamelCase__ : List[str] = decoder_config
# main feature dimension for the model
lowerCamelCase__ : List[str] = fpn_feature_size
lowerCamelCase__ : Dict = mask_feature_size
# initializer
lowerCamelCase__ : List[str] = init_std
lowerCamelCase__ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : Dict = dice_weight
lowerCamelCase__ : Optional[Any] = mask_weight
lowerCamelCase__ : str = use_auxiliary_loss
lowerCamelCase__ : int = no_object_weight
lowerCamelCase__ : Tuple = output_auxiliary_logits
lowerCamelCase__ : Optional[int] = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def A_ ( cls : List[Any] , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Any ) -> str:
return cls(
backbone_config=_snake_case , decoder_config=_snake_case , **_snake_case , )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[Any] = self.backbone_config.to_dict()
lowerCamelCase__ : Tuple = self.decoder_config.to_dict()
lowerCamelCase__ : Tuple = self.__class__.model_type
return output
| 50 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__snake_case : List[Any] = None
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__snake_case : Tuple = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
__snake_case : Optional[Any] = '''▁'''
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['input_ids', 'token_type_ids']
__lowercase : int = FNetTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A_ = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = False if not self.vocab_file else True
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 363 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 0 |
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ (snake_case__ : Union[str, Any] = 1_00 ):
"""simple docstring"""
return sum(map(__SCREAMING_SNAKE_CASE , str(factorial(__SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 64 |
from manim import *
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('CPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('GPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Model' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
rect.set_stroke(snake_case )
lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 )
self.add(snake_case )
model_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case , *snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Loaded Checkpoint' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = fill.copy().set_fill(snake_case , opacity=0.7 )
target.move_to(snake_case )
ckpt_arr.append(snake_case )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case , snake_case )
lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case )
lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Disk' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) )
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(FadeOut(snake_case ) )
lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=3 ) )
self.play(
FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , )
self.wait()
| 195 | 0 |
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ : Union[str, Any] ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowerCAmelCase__ : Optional[Any] ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowerCAmelCase__ : Optional[Any] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
if return_pvalue:
SCREAMING_SNAKE_CASE_ : Dict = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )}
| 365 |
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : int = logging.get_logger(__name__)
@dataclass
class lowercase__ ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_SCREAMING_SNAKE_CASE : Any = deprecated_arg[3:]
_SCREAMING_SNAKE_CASE : List[str] = not kwargs.pop(__A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""tpu_name""" , self.tpu_name )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""device_idx""" , self.device_idx )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""eager_mode""" , self.eager_mode )
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__A )
A_ : List[str] = field(
default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
A_ : Dict = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
A_ : List[str] = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
A_ : List[Any] = field(
default=lowerCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
_SCREAMING_SNAKE_CASE : Any = None
if self.tpu:
try:
if self.tpu_name:
_SCREAMING_SNAKE_CASE : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return tpu
@cached_property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_SCREAMING_SNAKE_CASE : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCAmelCase_ ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase_ ( self ):
return self.n_gpu > 0
| 200 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCAmelCase : str = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 13_1072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return torch.atana(UpperCamelCase__ , UpperCamelCase__ ) / math.pi * 2
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase__ , UpperCamelCase__ )
class lowercase ( lowercase_ ):
pass
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = DiffusionAttnUnetaD(snake_case , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=snake_case )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = MODELS_MAP[model_name]['url']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
_UpperCAmelCase : Optional[Any] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
_UpperCAmelCase : int = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
_UpperCAmelCase : Dict = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
_UpperCAmelCase : List[str] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
_UpperCAmelCase : str = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
_UpperCAmelCase : Dict = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return name.replace(UpperCamelCase__ , UpperCamelCase__ )
elif name.startswith(UpperCamelCase__ ):
return [name.replace(UpperCamelCase__ , UpperCamelCase__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=13 ):
'''simple docstring'''
snake_case_ = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
snake_case_ = 0
if string.startswith('net.3.' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('net.' ):
snake_case_ = string[4:]
while string.startswith('main.7.' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('main.' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = 'mid_block'
elif depth > 0 and int(UpperCamelCase__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(UpperCamelCase__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(UpperCamelCase__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(UpperCamelCase__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(UpperCamelCase__ )
snake_case_ = new_string_left
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = prefix + '.' + new_layer + '.' + string_left
else:
snake_case_ = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(UpperCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = transform_conv_attns(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ = v
return new_state_dict
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case_ = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(UpperCamelCase__ )
snake_case_ = MODELS_MAP[model_name]['sample_rate']
snake_case_ = MODELS_MAP[model_name]['sample_size']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=UpperCamelCase__ , sample_rate=UpperCamelCase__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(UpperCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase__ )['state_dict'] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(UpperCamelCase__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(UpperCamelCase__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ )
snake_case_ = torch.manual_seed(UpperCamelCase__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase__ ).to(UpperCamelCase__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase__ )[:-1]
snake_case_ = get_crash_schedule(UpperCamelCase__ )
snake_case_ = DanceDiffusionPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ ).audios
snake_case_ = sampling.iplms_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase__ )
print('Diff max' , UpperCamelCase__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_UpperCAmelCase : List[Any] = parser.parse_args()
main(args)
| 200 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.0_02 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def a ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def a ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def a ( self ):
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
snake_case_ = model(input_ids=snake_case , decoder_input_ids=snake_case )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
snake_case_ = model(snake_case , use_cache=snake_case )
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(snake_case )['last_hidden_state']
snake_case_ = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def a ( self , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
snake_case_ = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__SCREAMING_SNAKE_CASE : List[str] = [0.8, 0.9]
def a ( self ):
snake_case_ = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def a ( self ):
snake_case_ = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
snake_case_ = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
snake_case_ = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def a ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def a ( self ):
snake_case_ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
snake_case_ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
snake_case_ = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
snake_case_ = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
snake_case_ = model.generate(input_ids.to(snake_case ) )
snake_case_ = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 200 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = TextaTextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return generator, ["Something to write", "Something else"]
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = generator('Something there' )
self.assertEqual(lowercase , [{'generated_text': ANY(lowercase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_lowerCamelCase : Optional[Any] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
] , )
_lowerCamelCase : Union[str, Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
generator(4 )
@require_torch
def A_ ( self ):
_lowerCamelCase : int = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_lowerCamelCase : Dict = generator('Something there' , do_sample=lowercase )
self.assertEqual(lowercase , [{'generated_text': ''}] )
_lowerCamelCase : str = 3
_lowerCamelCase : str = generator(
'Something there' , num_return_sequences=lowercase , num_beams=lowercase , )
_lowerCamelCase : int = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : int = generator('This is a test' , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_lowerCamelCase : Optional[int] = generator.model.config.eos_token_id
_lowerCamelCase : Union[str, Any] = '<pad>'
_lowerCamelCase : List[str] = generator(
['This is a test', 'This is a second test'] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def A_ ( self ):
_lowerCamelCase : List[str] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_lowerCamelCase : List[str] = generator('Something there' , do_sample=lowercase )
self.assertEqual(lowercase , [{'generated_text': ''}] ) | 96 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 9
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = kruskal(__snake_case, __snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 194 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = random()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1)
def __str__( self):
__SCREAMING_SNAKE_CASE = str(self.value) + """ """
__SCREAMING_SNAKE_CASE = str(self.left or """""")
__SCREAMING_SNAKE_CASE = str(self.right or """""")
return value + left + right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(root.left , UpperCamelCase_ )
return left, root
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(root.right , UpperCamelCase_ )
return root, right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__SCREAMING_SNAKE_CASE = merge(left.right , UpperCamelCase_ )
return left
else:
__SCREAMING_SNAKE_CASE = merge(UpperCamelCase_ , right.left )
return right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Node(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(UpperCamelCase_ , UpperCamelCase_ )
return merge(merge(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(UpperCamelCase_ , value - 1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(UpperCamelCase_ , UpperCamelCase_ )
return merge(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for arg in args.split():
if arg[0] == "+":
__SCREAMING_SNAKE_CASE = insert(UpperCamelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
__SCREAMING_SNAKE_CASE = erase(UpperCamelCase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
__SCREAMING_SNAKE_CASE = input()
while args != "q":
__SCREAMING_SNAKE_CASE = interact_treap(UpperCamelCase_ , UpperCamelCase_ )
print(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 255 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE = primes[:idx]
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE = False
for r in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = pow(UpperCamelCase_ , d * 2**r , UpperCamelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 255 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case_ )} )
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( snake_case_ ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( snake_case_ ):
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> List[str]:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 168 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = '''ResNetConfig'''
# Base docstring
__a = '''microsoft/resnet-50'''
__a = [1, 20_48, 7, 7]
# Image classification docstring
__a = '''microsoft/resnet-50'''
__a = '''tiger cat'''
__a = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" ):
super().__init__()
lowercase : Optional[int] = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self.convolution(SCREAMING_SNAKE_CASE__ )
lowercase : int = self.normalization(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Optional[Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase : List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase : List[Any] = config.num_channels
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase : Optional[int] = self.embedder(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 ):
super().__init__()
lowercase : Dict = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = self.convolution(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" ):
super().__init__()
lowercase : List[Any] = in_channels != out_channels or stride != 1
lowercase : str = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
lowercase : Dict = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
lowercase : Any = ACTaFN[activation]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = hidden_state
lowercase : Optional[Any] = self.layer(SCREAMING_SNAKE_CASE__ )
lowercase : str = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowercase : str = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" , SCREAMING_SNAKE_CASE__ = 4 ):
super().__init__()
lowercase : Union[str, Any] = in_channels != out_channels or stride != 1
lowercase : List[Any] = out_channels // reduction
lowercase : Tuple = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
lowercase : Optional[Any] = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
lowercase : Optional[int] = ACTaFN[activation]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = hidden_state
lowercase : Any = self.layer(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
lowercase : List[str] = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , ):
super().__init__()
lowercase : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowercase : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = input
for layer in self.layers:
lowercase : List[str] = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True ):
lowercase : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase : List[Any] = hidden_states + (hidden_state,)
lowercase : Any = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
lowercase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = ResNetConfig
A : str = 'resnet'
A : Tuple = 'pixel_values'
A : List[Any] = True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = value
__a = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__a = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , A__ , )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = config
lowercase : int = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
lowercase : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Tuple = self.embedder(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
lowercase : int = encoder_outputs[0]
lowercase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A__ , )
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowercase : int = config.num_labels
lowercase : Union[str, Any] = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
lowercase : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Tuple = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase : Union[str, Any] = self.classifier(SCREAMING_SNAKE_CASE__ )
lowercase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase : str = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase : Union[str, Any] = '''single_label_classification'''
else:
lowercase : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase : Optional[int] = MSELoss()
if self.num_labels == 1:
lowercase : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase : str = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
lowercase : Tuple = CrossEntropyLoss()
lowercase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase : int = BCEWithLogitsLoss()
lowercase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowercase : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , A__ , )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = [config.embedding_size] + config.hidden_sizes
lowercase : Optional[Any] = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
lowercase : int = outputs.hidden_states
lowercase : List[str] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 173 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase , lowercase : str = emb.weight.shape
lowercase : Optional[int] = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
lowercase : Any = emb.weight.data
return lin_layer
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = torch.load(_UpperCamelCase, map_location='''cpu''' )
lowercase : List[str] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase : int = mam_aaa['''model''']
remove_ignore_keys_(_UpperCamelCase )
lowercase : Any = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase : Dict = MaMaaaConfig(
vocab_size=_UpperCamelCase, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
lowercase : Union[str, Any] = state_dict['''decoder.embed_tokens.weight''']
lowercase : Dict = MaMaaaForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
lowercase : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__a = parser.parse_args()
__a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 173 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=1_0_0 , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Optional[int]=3_0 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=[0, 1, 2, 3] , ):
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : Union[str, Any] = 1_0_0
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : str = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : Tuple = out_indices
_lowerCamelCase : Dict = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCamelCase : str = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BeitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = BeitForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.type_sequence_label_size
_lowerCamelCase : int = BeitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Union[str, Any] = BeitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : List[Any] = BeitForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = BeitModelTester(self )
_lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowerCAmelCase ), BeitForMaskedImageModeling]:
continue
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase : str = False
_lowerCamelCase : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowerCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : Any = model(**__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = BeitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values.to(__lowerCAmelCase )
# prepare bool_masked_pos
_lowerCamelCase : List[Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(pixel_values=__lowerCAmelCase , bool_masked_pos=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : List[str] = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowerCAmelCase , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
_lowerCamelCase : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = outputs.logits
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
_lowerCamelCase : Optional[Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_lowerCamelCase : Union[str, Any] = model.to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BeitImageProcessor(do_resize=__lowerCAmelCase , size=6_4_0 , do_center_crop=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_lowerCamelCase : Dict = Image.open(ds[0]['''file'''] )
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.logits
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_lowerCamelCase : int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
_lowerCamelCase : str = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=__lowerCAmelCase , )
else:
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BeitImageProcessor(do_resize=__lowerCAmelCase , size=6_4_0 , do_center_crop=__lowerCAmelCase )
_lowerCamelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_lowerCamelCase : Any = Image.open(ds[0]['''file'''] )
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
_lowerCamelCase : Dict = outputs.logits.detach().cpu()
_lowerCamelCase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(5_0_0, 3_0_0)] )
_lowerCamelCase : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 72 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : Optional[int] = 16
__A : Optional[Any] = 32
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self :Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case_ : Optional[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self :List[Any] ,*_UpperCamelCase :Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
snake_case_ : int = torch.cuda.memory_allocated()
snake_case_ : Optional[int] = torch.cuda.max_memory_allocated()
snake_case_ : Dict = bamb(self.end - self.begin )
snake_case_ : List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase ( lowerCamelCase_ :Accelerator , lowerCamelCase_ :int = 16 , lowerCamelCase_ :str = "bert-base-cased" , lowerCamelCase_ :int = 3_20 , lowerCamelCase_ :int = 1_60 , ):
'''simple docstring'''
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : str = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase_ :Any ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ : Optional[Any] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase_ :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
snake_case_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Any ):
'''simple docstring'''
# Initialize accelerator
snake_case_ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Any = config["""lr"""]
snake_case_ : List[Any] = int(config["""num_epochs"""] )
snake_case_ : str = int(config["""seed"""] )
snake_case_ : Union[str, Any] = int(config["""batch_size"""] )
snake_case_ : Optional[Any] = args.model_name_or_path
set_seed(lowerCamelCase_ )
snake_case_ , snake_case_ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
snake_case_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ : Optional[int] = 1
snake_case_ : Any = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
snake_case_ : List[str] = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
snake_case_ : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ : List[str] = 0
# Now we train the model
snake_case_ : Tuple = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
snake_case_ : Any = model(**lowerCamelCase_ )
snake_case_ : List[str] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case_ : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase_ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase_ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase_ , default=1 , help="""Number of train epochs.""" , )
snake_case_ : Any = parser.parse_args()
snake_case_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs | 8 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_ (A : List[str] ):
if "model" in orig_key:
snake_case__ : Any = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
snake_case__ : Any = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
snake_case__ : str = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
snake_case__ : Tuple = orig_key.split('.' )[0].split('_' )[-1]
snake_case__ : Optional[Any] = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ : int = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
snake_case__ : Any = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
snake_case__ : Any = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
snake_case__ : Tuple = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
snake_case__ : Any = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
snake_case__ : int = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
snake_case__ : List[str] = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
snake_case__ : str = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
snake_case__ : Dict = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
snake_case__ : Any = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
snake_case__ : Optional[Any] = 'yoso.' + orig_key
return orig_key
def lowercase_ (A : Any , A : str ):
for key in orig_state_dict.copy().keys():
snake_case__ : str = orig_state_dict.pop(A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ : Dict = val
snake_case__ : Union[str, Any] = orig_state_dict['cls.predictions.decoder.bias']
snake_case__ : str = torch.arange(A ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_ (A : str , A : List[str] , A : Union[str, Any] ):
snake_case__ : Any = torch.load(A , map_location='cpu' )['model_state_dict']
snake_case__ : str = YosoConfig.from_json_file(A )
snake_case__ : Any = YosoForMaskedLM(A )
snake_case__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , A )
print(model.load_state_dict(A ) )
model.eval()
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Union[str, Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 277 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ :Tuple = logging.get_logger(__name__)
a_ :Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : Dict = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[int] = relative_attention
snake_case__ : Tuple = max_relative_positions
snake_case__ : Union[str, Any] = pad_token_id
snake_case__ : Optional[int] = position_biased_input
# Backwards compatibility
if type(_snake_case ) == str:
snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case )
snake_case__ : int = pooler_dropout
snake_case__ : str = pooler_hidden_act
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase_ ( self : Dict ) ->int:
return 1_2
def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]:
snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 277 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
A__ = psutil.Process()
A__ = False
def UpperCamelCase ( self ):
A__ = -1
while True:
A__ = max(self.process.memory_info().rss,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase ( self ):
A__ = True
A__ = threading.Thread(target=self.peak_monitor )
A__ = True
self.thread.start()
def UpperCamelCase ( self ):
A__ = False
self.thread.join()
return self.cpu_memory_peak
a__: Any = PeakCPUMemory()
def UpperCamelCase__( )->Union[str, Any]:
# Time
A__ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A__ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
A__ = torch.cuda.memory_allocated(_A )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->str:
# Time
A__ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A__ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
A__ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
A__ = (torch.cuda.memory_allocated(_A ) - start_measures[str(_A )]) / 2**20
A__ = (torch.cuda.max_memory_allocated(_A ) - start_measures[str(_A )]) / 2**20
return measures
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any )->Dict:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_A )]:.2f}MiB" )
A__ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 357 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
| 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 76 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ : List[Any] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
UpperCamelCase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase_ ( _lowerCamelCase: tuple ):
return x[0]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = get_letter_count(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = """""".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = get_frequency_order(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase : str = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_lowercase : int = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_lowercase : Optional[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_lowercase : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def a ( self : Union[str, Any] , _lowercase : Dict ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def a ( self : int , _lowercase : int , _lowercase : List[str] , _lowercase : Any=0.9 , _lowercase : int=3 , _lowercase : List[str]=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(_lowercase ) , word_tokenize(_lowercase ) , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
else:
__UpperCAmelCase = [
meteor_score.single_meteor_score(_lowercase , _lowercase , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
return {"meteor": np.mean(_lowercase )}
| 86 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=None , _lowerCAmelCase="no" , _lowerCAmelCase="29500" ) -> int:
snake_case__ : List[str] = False
snake_case__ : Tuple = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
snake_case__ : List[str] = True
elif "IPython" in sys.modules:
snake_case__ : int = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
snake_case__ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
snake_case__ : Union[str, Any] = 8
snake_case__ : str = PrepareForLaunch(_lowerCAmelCase , distributed_type="""TPU""" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
snake_case__ : Optional[Any] = PrepareForLaunch(_lowerCAmelCase , distributed_type="""MULTI_GPU""" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case__ : Tuple = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=2 ) -> Tuple:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
snake_case__ : Tuple = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
| 35 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__SCREAMING_SNAKE_CASE :List[str] = {'''allegro/herbert-base-cased''': 514}
__SCREAMING_SNAKE_CASE :List[str] = {}
class A_ ( UpperCAmelCase_ ):
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : int , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : Dict=None , snake_case_ : Tuple="<s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : List[Any]="</s>" , **snake_case_ : List[str] , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sep_token=__lowercase , **__lowercase , )
def lowercase ( self : Dict , snake_case_ : int , snake_case_ : List[str] = None ):
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any = None , snake_case_ : int = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def lowercase ( self : int , snake_case_ : Optional[int] , snake_case_ : int = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 367 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
__SCREAMING_SNAKE_CASE :Any = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__SCREAMING_SNAKE_CASE :Dict = '''>>zh<<'''
__SCREAMING_SNAKE_CASE :Optional[Any] = '''Helsinki-NLP/'''
if is_torch_available():
__SCREAMING_SNAKE_CASE :Optional[int] = '''pt'''
elif is_tf_available():
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''tf'''
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''jax'''
@require_sentencepiece
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = MarianTokenizer
_lowerCamelCase : int = False
_lowerCamelCase : Dict = True
def lowercase ( self : Tuple ):
super().setUp()
_UpperCAmelCase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
_UpperCAmelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : Optional[Any] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return (
"This is a test",
"This is a test",
)
def lowercase ( self : Tuple ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case_ ) , 9 )
def lowercase ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
_UpperCAmelCase = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_UpperCAmelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
_UpperCAmelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = [x.name for x in Path(snake_case_ ).glob("*" )]
self.assertIn("source.spm" , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def lowercase ( self : Any ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
_UpperCAmelCase = "Tämä on testi"
_UpperCAmelCase = "This is a test"
_UpperCAmelCase = [7_6, 7, 2_0_4_7, 2]
_UpperCAmelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
_UpperCAmelCase = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 156 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( lowercase_ :ArgumentParser ) -> List[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
raise NotImplementedError()
| 78 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
A_ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
A_ = 3
A_ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
A_ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
A_ = generator.model.config.eos_token_id
A_ = """<pad>"""
A_ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 162 | 0 |
'''simple docstring'''
import random
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [ord(__SCREAMING_SNAKE_CASE ) for i in text]
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in plain:
__lowerCAmelCase = random.randint(1,3_00 )
__lowerCAmelCase = (i + k) * k
cipher.append(__SCREAMING_SNAKE_CASE )
key.append(__SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__SCREAMING_SNAKE_CASE ) )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_a ,_a : Union[str, Any] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 46 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> List[str]:
# Initialise PyTorch model
__lowerCAmelCase = BertConfig.from_json_file(lowercase )
print(f'Building PyTorch model from configuration: {config}' )
__lowerCAmelCase = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 46 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_SCREAMING_SNAKE_CASE : Optional[Any] = (low + high) // 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = max_subarray(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = max_subarray(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = max_cross_sum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = float("""-inf""" ), -1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = float("""-inf""" ), -1
_SCREAMING_SNAKE_CASE : int | float = 0
for i in range(SCREAMING_SNAKE_CASE__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_SCREAMING_SNAKE_CASE : Tuple = summ
_SCREAMING_SNAKE_CASE : int = i
_SCREAMING_SNAKE_CASE : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_SCREAMING_SNAKE_CASE : Dict = summ
_SCREAMING_SNAKE_CASE : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = [randint(1 , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
_SCREAMING_SNAKE_CASE : Tuple = time.time()
max_subarray(SCREAMING_SNAKE_CASE__ , 0 , input_size - 1 )
_SCREAMING_SNAKE_CASE : Tuple = time.time()
return end - start
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
_SCREAMING_SNAKE_CASE : List[Any] = [time_max_subarray(SCREAMING_SNAKE_CASE__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ , """\t\t""" , SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
'''simple docstring'''
A_ : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : Tuple = CursorInfo()
_SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : int = CursorInfo()
_SCREAMING_SNAKE_CASE : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 200 | 1 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int ):
if b == 0:
return (1, 0)
((UpperCamelCase_), (UpperCamelCase_)) : Optional[Any] = extended_euclid(lowerCamelCase , a % b )
UpperCamelCase_ : str = a // b
return (y, x - k * y)
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
((UpperCamelCase_), (UpperCamelCase_)) : Optional[int] = extended_euclid(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : List[str] = na * na
UpperCamelCase_ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int ):
((UpperCamelCase_), (UpperCamelCase_)) : List[str] = extended_euclid(lowerCamelCase , lowerCamelCase )
if b < 0:
UpperCamelCase_ : Dict = (b % n + n) % n
return b
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
UpperCamelCase_, UpperCamelCase_ : Optional[int] = invert_modulo(lowerCamelCase , lowerCamelCase ), invert_modulo(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : List[str] = na * na
UpperCamelCase_ : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 50 | import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50 | 1 |
"""simple docstring"""
_UpperCamelCase: Dict = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 255 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 1 |
"""simple docstring"""
from math import ceil
def A ( snake_case :str , snake_case :Tuple ) -> str:
__UpperCamelCase = list(range(0 , snake_case ) )
__UpperCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case )
# Missing blocks
__UpperCamelCase = [i for i in blocks if i not in device_map_blocks]
__UpperCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(snake_case ) )
if len(snake_case ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(snake_case ) )
if len(snake_case ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(snake_case ) )
def A ( snake_case :int , snake_case :str ) -> Any:
__UpperCamelCase = list(range(snake_case ) )
__UpperCamelCase = int(ceil(n_layers / len(snake_case ) ) )
__UpperCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case , snake_case )]
return dict(zip(snake_case , snake_case ) )
| 263 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Union[str, Any]:
__UpperCamelCase = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
__UpperCamelCase = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
__UpperCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
__UpperCamelCase = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 263 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( UpperCAmelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =ort.SessionOptions()
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return options
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE_: Dict =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ="""A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE_: int =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_: Union[str, Any] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE_: Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE_: Tuple =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
SCREAMING_SNAKE_CASE_: str =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ="""A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE_: Tuple =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 173 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__A = threading.Lock()
__A = None
__A = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__A = logging.WARNING
__A = True
def a__ ( ) -> Dict:
__lowerCAmelCase: List[str] = os.getenv("TRANSFORMERS_VERBOSITY" , __SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split("." )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowerCAmelCase: str = logging.StreamHandler() # Set sys.stderr as stream.
__lowerCAmelCase: Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowerCAmelCase: List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowerCAmelCase: List[Any] = False
def a__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
__lowerCAmelCase: Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowerCAmelCase: str = None
def a__ ( ) -> int:
return log_levels
def a__ ( __SCREAMING_SNAKE_CASE = None ) -> logging.Logger:
if name is None:
__lowerCAmelCase: Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Any:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Dict:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> str:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
_configure_library_root_logger()
__lowerCAmelCase: Optional[Any] = False
def a__ ( ) -> None:
_configure_library_root_logger()
__lowerCAmelCase: Union[str, Any] = True
def a__ ( ) -> None:
__lowerCAmelCase: Any = _get_library_root_logger().handlers
for handler in handlers:
__lowerCAmelCase: str = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
__lowerCAmelCase: List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def a__ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Any = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__A = warning_advice
@functools.lru_cache(__SCREAMING_SNAKE_CASE )
def a__ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__A = warning_once
class snake_case :
def __init__( self : Optional[int] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str)-> Union[str, Any]: # pylint: disable=unused-argument
'''simple docstring'''
__lowerCAmelCase: Dict = args[0] if args else None
def __iter__( self : Optional[Any])-> int:
'''simple docstring'''
return iter(self._iterator)
def __getattr__( self : Tuple , UpperCamelCase__ : Tuple)-> int:
'''simple docstring'''
def empty_fn(*UpperCamelCase__ : str , **UpperCamelCase__ : List[Any]): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict)-> List[str]:
'''simple docstring'''
return self
def __exit__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict)-> str:
'''simple docstring'''
return
class snake_case :
def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__)
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> Dict:
global _tqdm_active
__lowerCAmelCase: Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> int:
global _tqdm_active
__lowerCAmelCase: str = False
hf_hub_utils.disable_progress_bars()
| 108 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(r"^\s*try:")
# Catches a line with else:
__A = re.compile(r"^\s*else:")
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
__lowerCAmelCase: Union[str, Any] = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Optional[int] = f.readlines()
__lowerCAmelCase: Dict = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase: Optional[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase: Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
__lowerCAmelCase: List[Any] = re.findall("\[([^\]]+)\]" , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase: str = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowerCAmelCase: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase: Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase: Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Union[str, Any] = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: int = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase: Tuple = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
__lowerCAmelCase: Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase: str = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase: List[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase: Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase: Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase: List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase: Optional[int] = lines[line_index]
__lowerCAmelCase: Any = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase: Optional[int] = []
for key in import_dict_objects.keys():
__lowerCAmelCase: Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase: Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase: Union[str, Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def a__ ( ) -> Tuple:
__lowerCAmelCase: Optional[Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowerCAmelCase: List[str] = os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" )
__lowerCAmelCase: int = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
__lowerCAmelCase: Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(__SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> Any:
__lowerCAmelCase: Optional[int] = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase: Optional[int] = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = short_path.replace(os.path.sep , "." )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase: Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def a__ ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase: Optional[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__SCREAMING_SNAKE_CASE , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCAmelCase: Optional[int] = spec.loader.load_module()
__lowerCAmelCase: str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Optional[int] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return int(x / 2**20 )
class snake_case_ :
'''simple docstring'''
def __enter__( self : Any ) ->Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case_ = torch.cuda.memory_allocated()
return self
def __exit__( self : List[str] , *_UpperCamelCase : str ) ->List[Any]:
gc.collect()
torch.cuda.empty_cache()
snake_case_ = torch.cuda.memory_allocated()
snake_case_ = torch.cuda.max_memory_allocated()
snake_case_ = bamb(self.end - self.begin )
snake_case_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" , SCREAMING_SNAKE_CASE__ = 320 , SCREAMING_SNAKE_CASE__ = 160 , ):
snake_case_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'''train[:{n_train}]''', '''validation''': F'''validation[:{n_val}]'''} )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
snake_case_ = 1
snake_case_ = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
snake_case_ = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=SCREAMING_SNAKE_CASE__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=SCREAMING_SNAKE_CASE__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of train epochs.''' , )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main() | 8 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( UpperCAmelCase_ : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_a ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
A__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
A__ = PipelineDataFormat.from_str(
format=_a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_a , _a )
class a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = nlp
A__ = reader
@staticmethod
def UpperCamelCase ( UpperCamelCase: Dict ):
"""simple docstring"""
A__ = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=_SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self._nlp, []
for entry in self._reader:
A__ = nlp(**_SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
outputs.append(_SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
A__ = self._reader.save_binary(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_SCREAMING_SNAKE_CASE )
| 364 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = tubelet_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A__ = (image_size // patch_size) ** 2
A__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A__ = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.batch_size , -1 ).bool()
A__ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
A__ = mask.sum().item()
A__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = VideoMAEModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
A__ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.model_tester.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
A__ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
def _snake_case ( ):
A__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A__ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A__ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size([1, 14_08, 15_36] )
A__ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A__ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 69 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.