code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(__lowercase ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
a : list[float] = list(__lowercase )
a : List[Any] = degree
def __add__( self , lowerCAmelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
a : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowercase )
else:
a : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowercase )
def __sub__( self , lowerCAmelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase__ ) -> Polynomial:
a : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowercase )
def __a ( self , lowerCAmelCase__ ) -> int | float:
a : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
a : int = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowercase )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __a ( self ) -> Polynomial:
a : list[float] = [0] * self.degree
for i in range(self.degree ):
a : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowercase )
def __a ( self , lowerCAmelCase__ = 0 ) -> Polynomial:
a : list[float] = [0] * (self.degree + 2)
a : Any = constant
for i in range(self.degree + 1 ):
a : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowercase )
def __eq__( self , lowerCAmelCase__ ) -> bool:
if not isinstance(__lowercase , __lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase__ ) -> bool:
return not self.__eq__(__lowercase )
| 105 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( UpperCAmelCase__ ):
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """ViTImageProcessor"""
_lowerCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any, lowerCAmelCase : int=None, lowerCAmelCase : List[str]=None, **lowerCAmelCase : int ) -> int:
lowercase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', __lowercase, )
lowercase : int = kwargs.pop('feature_extractor' )
lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowercase, __lowercase )
def __call__( self : int, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : List[Any]=None, lowerCAmelCase : List[str]=None, lowerCAmelCase : Union[str, Any]=None, **lowerCAmelCase : Optional[Any] ) -> List[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
lowercase : Tuple = self.tokenizer(__lowercase, return_tensors=__lowercase, **__lowercase )
if visual_prompt is not None:
lowercase : Any = self.image_processor(__lowercase, return_tensors=__lowercase, **__lowercase )
if images is not None:
lowercase : Union[str, Any] = self.image_processor(__lowercase, return_tensors=__lowercase, **__lowercase )
if visual_prompt is not None and images is not None:
lowercase : str = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase : int = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ), tensor_type=__lowercase )
def lowercase ( self : List[str], *lowerCAmelCase : List[str], **lowerCAmelCase : int ) -> List[str]:
return self.tokenizer.batch_decode(*__lowercase, **__lowercase )
def lowercase ( self : Union[str, Any], *lowerCAmelCase : Dict, **lowerCAmelCase : Union[str, Any] ) -> int:
return self.tokenizer.decode(*__lowercase, **__lowercase )
@property
def lowercase ( self : int ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', __lowercase, )
return self.image_processor_class
@property
def lowercase ( self : str ) -> List[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', __lowercase, )
return self.image_processor
| 255 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
def lowerCamelCase__ ( _a , _a):
def get_matched_characters(_a , _a) -> str:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
SCREAMING_SNAKE_CASE : str = int(max(0 , i - limit))
SCREAMING_SNAKE_CASE : List[Any] = int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(lowerCAmelCase_)
SCREAMING_SNAKE_CASE : List[str] = f"{_stra[0:_stra.index(lowerCAmelCase_)]} {_stra[_stra.index(lowerCAmelCase_) + 1:]}"
return "".join(lowerCAmelCase_)
# matching characters
SCREAMING_SNAKE_CASE : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_)
SCREAMING_SNAKE_CASE : List[str] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_)
SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase_)
# transposition
SCREAMING_SNAKE_CASE : List[Any] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_) if ca != ca]) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE : List[Any] = 0.0
else:
SCREAMING_SNAKE_CASE : str = (
1
/ 3
* (
match_count / len(lowerCAmelCase_)
+ match_count / len(lowerCAmelCase_)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE : int = 0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 76 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase_ (UpperCAmelCase__ ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class lowerCAmelCase_ (UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = 64 , SCREAMING_SNAKE_CASE__ = 20 , SCREAMING_SNAKE_CASE__ = 7_68 , SCREAMING_SNAKE_CASE__=77 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "silu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = "prd" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = attention_head_dim
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : List[str] = additional_embeddings
SCREAMING_SNAKE_CASE__ : str = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ : int = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : int = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Dict = Timesteps(__lowercase , __lowercase , 0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
SCREAMING_SNAKE_CASE__ : int = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(__lowercase )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ : List[Any] = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ : Any = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ : List[str] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(__lowercase )
SCREAMING_SNAKE_CASE__ : Any = nn.Linear(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(torch.zeros(1 , __lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ (self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(__lowercase , """set_processor""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : int = timestep
if not torch.is_tensor(__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : str = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : List[str] = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ : Optional[Any] = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ : int = self.embedding_proj_norm(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
SCREAMING_SNAKE_CASE__ : int = self.proj_in(__lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ : Dict = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
SCREAMING_SNAKE_CASE__ : List[Any] = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE__ : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ : Any = self.norm_in(__lowercase )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : Dict = block(__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norm_out(__lowercase )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Any = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ : Dict = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ : Dict = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = parent
def __UpperCAmelCase ( self ) -> str:
return {}
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
UpperCAmelCase_ : Optional[int] = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase (UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = MarkupLMFeatureExtractionTester(self )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize feature_extractor
UpperCAmelCase_ : Tuple = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase_ : Optional[int] = get_html_strings()[0]
UpperCAmelCase_ : List[Any] = feature_extractor(__lowercase )
# fmt: off
UpperCAmelCase_ : List[str] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
UpperCAmelCase_ : str = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase )
# Test batched
UpperCAmelCase_ : str = get_html_strings()
UpperCAmelCase_ : int = feature_extractor(__lowercase )
# fmt: off
UpperCAmelCase_ : Optional[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
UpperCAmelCase_ : List[str] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase )
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
A_ : List[Any] = """char"""
A_ : List[Any] = """bpe"""
A_ : Dict = """wp"""
snake_case_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
A_ : Dict = ["""image_processor""", """char_tokenizer"""]
A_ : Union[str, Any] = """ViTImageProcessor"""
A_ : str = """MgpstrTokenizer"""
def __init__(self : List[str] , a__ : int=None , a__ : Union[str, Any]=None , **a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__snake_case = tokenizer
__snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
__snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__lowercase , __lowercase )
def __call__(self : str , a__ : int=None , a__ : Optional[int]=None , a__ : List[Any]=None , **a__ : Optional[Any] ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__snake_case = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None:
__snake_case = self.char_tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['''input_ids''']
return inputs
def a (self : List[Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = sequences
__snake_case = char_preds.size(0 )
__snake_case = self._decode_helper(__lowercase , '''char''' )
__snake_case = self._decode_helper(__lowercase , '''bpe''' )
__snake_case = self._decode_helper(__lowercase , '''wp''' )
__snake_case = []
__snake_case = []
for i in range(__lowercase ):
__snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
__snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
__snake_case = scores.index(max(__lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__snake_case = {}
__snake_case = final_strs
__snake_case = final_scores
__snake_case = char_strs
__snake_case = bpe_strs
__snake_case = wp_strs
return out
def a (self : int , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__snake_case = self.char_decode
__snake_case = 1
__snake_case = '''[s]'''
elif format == DecodeType.BPE:
__snake_case = self.bpe_decode
__snake_case = 2
__snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
__snake_case = self.wp_decode
__snake_case = 102
__snake_case = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
__snake_case = [], []
__snake_case = pred_logits.size(0 )
__snake_case = pred_logits.size(1 )
__snake_case = pred_logits.topk(1 , dim=-1 , largest=__lowercase , sorted=__lowercase )
__snake_case = preds_index.view(-1 , __lowercase )[:, 1:]
__snake_case = decoder(__lowercase )
__snake_case = torch.nn.functional.softmax(__lowercase , dim=2 ).max(dim=2 )
__snake_case = preds_max_prob[:, 1:]
for index in range(__lowercase ):
__snake_case = preds_str[index].find(__lowercase )
__snake_case = preds_str[index][:pred_eos]
__snake_case = preds_index[index].cpu().tolist()
__snake_case = pred_index.index(__lowercase ) if eos_token in pred_index else -1
__snake_case = preds_max_prob[index][: pred_eos_index + 1]
__snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowercase )
conf_scores.append(__lowercase )
return dec_strs, conf_scores
def a (self : List[Any] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__lowercase )]
return decode_strs
def a (self : str , a__ : str ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(__lowercase )
def a (self : List[Any] , a__ : int ):
"""simple docstring"""
__snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__lowercase )]
return decode_strs
| 24 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =[
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = """examples/"""
lowerCAmelCase__ = {
"""examples""": (re.compile(r'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase__ = """README.md"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : Optional[int] = f.read()
lowerCAmelCase : List[str] = REPLACE_PATTERNS[pattern]
lowerCAmelCase : Union[str, Any] = replace.replace("VERSION" , lowerCAmelCase_ )
lowerCAmelCase : List[Any] = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowerCAmelCase_ )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern="examples" )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase : List[Any] = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCAmelCase : Tuple = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase_ )
def a__ ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCAmelCase : List[Any] = f.read()
lowerCAmelCase : Optional[int] = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def a__ ( SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCAmelCase : int = default_version.base_version
elif patch:
lowerCAmelCase : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase : Dict = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase : Any = input(f"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase_ ) == 0:
lowerCAmelCase : Optional[Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
if not patch:
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = get_version()
lowerCAmelCase : Optional[int] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase : int = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase_ ) == 0:
lowerCAmelCase : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ )
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 108 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : Union[str, Any] ):
print('''Loading config file...''' )
def flatten_yaml_as_dict(_UpperCamelCase : Dict ,_UpperCamelCase : str="" ,_UpperCamelCase : int="." ):
__lowerCamelCase = []
for k, v in d.items():
__lowerCamelCase = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ ,lowerCAmelCase_ ,sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
__lowerCamelCase = argparse.Namespace()
with open(lowerCAmelCase_ ,'''r''' ) as yaml_file:
try:
__lowerCamelCase = yaml.load(lowerCAmelCase_ ,Loader=yaml.FullLoader )
__lowerCamelCase = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowerCAmelCase_ ,str(lowerCAmelCase_ ) ) )
return config
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ):
__lowerCamelCase = MobileViTVaConfig()
__lowerCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__lowerCamelCase = 10_00
if int(task_name.strip().split('''_''' )[-1] ) == 3_84:
__lowerCamelCase = 3_84
else:
__lowerCamelCase = 2_56
__lowerCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__lowerCamelCase = 2_10_00
if int(task_name.strip().split('''_''' )[-1] ) == 3_84:
__lowerCamelCase = 3_84
else:
__lowerCamelCase = 2_56
__lowerCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__lowerCamelCase = 1_51
__lowerCamelCase = 5_12
__lowerCamelCase = '''ade20k-id2label.json'''
__lowerCamelCase = True
elif task_name.startswith('''voc_''' ):
__lowerCamelCase = 21
__lowerCamelCase = 5_12
__lowerCamelCase = '''pascal-voc-id2label.json'''
__lowerCamelCase = True
# orig_config
__lowerCamelCase = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ ,'''model.classification.name''' ,-1 ) == "mobilevit_v2", "Invalid model"
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.classification.mitv2.width_multiplier''' ,1.0 )
assert (
getattr(lowerCAmelCase_ ,'''model.classification.mitv2.attn_norm_layer''' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.classification.activation.name''' ,'''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.output_stride''' ,16 )
if "_deeplabv3" in task_name:
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_rates''' ,[12, 24, 36] )
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_out_channels''' ,5_12 )
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_dropout''' ,0.1 )
# id2label
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type='''dataset''' ) ,'''r''' ) )
__lowerCamelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : int ):
__lowerCamelCase = dct.pop(lowerCAmelCase_ )
__lowerCamelCase = val
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple=False ):
if base_model:
__lowerCamelCase = ''''''
else:
__lowerCamelCase = '''mobilevitv2.'''
__lowerCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCamelCase = k[8:]
else:
__lowerCamelCase = k
if ".block." in k:
__lowerCamelCase = k_new.replace('''.block.''' ,'''.''' )
if ".conv." in k:
__lowerCamelCase = k_new.replace('''.conv.''' ,'''.convolution.''' )
if ".norm." in k:
__lowerCamelCase = k_new.replace('''.norm.''' ,'''.normalization.''' )
if "conv_1." in k:
__lowerCamelCase = k_new.replace('''conv_1.''' ,F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.""" ,F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__lowerCamelCase = k_new.replace('''.exp_1x1.''' ,'''.expand_1x1.''' )
if ".red_1x1." in k:
__lowerCamelCase = k_new.replace('''.red_1x1.''' ,'''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.0.""" ,F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.0.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.1.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__lowerCamelCase = [0, 1]
elif i == 4:
__lowerCamelCase = [0, 1, 2, 3]
elif i == 5:
__lowerCamelCase = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" ,F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" ,F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.conv_proj.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__lowerCamelCase = k_new.replace('''pre_norm_attn.0.''' ,'''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__lowerCamelCase = k_new.replace('''pre_norm_attn.1.''' ,'''attention.''' )
if "pre_norm_ffn.0." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.0.''' ,'''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.1.''' ,'''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.3.''' ,'''ffn.conv2.''' )
if "classifier.1." in k:
__lowerCamelCase = k_new.replace('''classifier.1.''' ,'''classifier.''' )
if "seg_head." in k:
__lowerCamelCase = k_new.replace('''seg_head.''' ,'''segmentation_head.''' )
if ".aspp_layer." in k:
__lowerCamelCase = k_new.replace('''.aspp_layer.''' ,'''.''' )
if ".aspp_pool." in k:
__lowerCamelCase = k_new.replace('''.aspp_pool.''' ,'''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def a__ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCamelCase = Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ):
__lowerCamelCase = get_mobilevitva_config(lowerCAmelCase_ ,lowerCAmelCase_ )
# load original state_dict
__lowerCamelCase = torch.load(lowerCAmelCase_ ,map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__lowerCamelCase = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
__lowerCamelCase = False
else:
__lowerCamelCase = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
__lowerCamelCase = False
# remove and rename some keys of load the original model
__lowerCamelCase = checkpoint
remove_unused_keys(lowerCAmelCase_ )
__lowerCamelCase = create_rename_keys(lowerCAmelCase_ ,base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCamelCase = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
__lowerCamelCase = image_processor(images=prepare_img() ,return_tensors='''pt''' )
__lowerCamelCase = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__lowerCamelCase = outputs.logits
__lowerCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCamelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,lowerCAmelCase_ ,atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 330 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase : int = """encodec"""
def __init__( self : str ,_snake_case : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] ,_snake_case : int=24_000 ,_snake_case : Tuple=1 ,_snake_case : int=False ,_snake_case : Optional[Any]=None ,_snake_case : Any=None ,_snake_case : Any=128 ,_snake_case : List[Any]=32 ,_snake_case : Union[str, Any]=1 ,_snake_case : List[Any]=[8, 5, 4, 2] ,_snake_case : Optional[Any]="weight_norm" ,_snake_case : List[Any]=7 ,_snake_case : Tuple=7 ,_snake_case : Dict=3 ,_snake_case : Optional[Any]=2 ,_snake_case : List[str]=True ,_snake_case : Any="reflect" ,_snake_case : Optional[int]=2 ,_snake_case : Tuple=2 ,_snake_case : List[str]=1.0 ,_snake_case : Dict=1_024 ,_snake_case : str=None ,_snake_case : Any=True ,**_snake_case : Any ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = target_bandwidths
lowercase__ : Optional[Any] = sampling_rate
lowercase__ : List[Any] = audio_channels
lowercase__ : int = normalize
lowercase__ : Union[str, Any] = chunk_length_s
lowercase__ : int = overlap
lowercase__ : Tuple = hidden_size
lowercase__ : Optional[int] = num_filters
lowercase__ : List[str] = num_residual_layers
lowercase__ : int = upsampling_ratios
lowercase__ : List[Any] = norm_type
lowercase__ : List[str] = kernel_size
lowercase__ : Union[str, Any] = last_kernel_size
lowercase__ : Any = residual_kernel_size
lowercase__ : str = dilation_growth_rate
lowercase__ : int = use_causal_conv
lowercase__ : Any = pad_mode
lowercase__ : Dict = compress
lowercase__ : Optional[int] = num_lstm_layers
lowercase__ : Union[str, Any] = trim_right_ratio
lowercase__ : Dict = codebook_size
lowercase__ : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
lowercase__ : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__lowercase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a : Optional[int] = """src/transformers"""
a : str = """docs/source/en"""
a : Optional[int] = """."""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] ) ->List[str]:
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
a : int = f.readlines()
# Find the start prompt.
a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
a : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
a : Optional[int] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
a : Dict = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a : Optional[Any] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
a : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Optional[int]:
'''simple docstring'''
a : str = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
a : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
a : int = (width - text_length) // 2
a : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ( ) ->str:
'''simple docstring'''
a : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a : List[Any] = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a : Tuple = collections.defaultdict(lowerCAmelCase_ )
a : List[str] = collections.defaultdict(lowerCAmelCase_ )
a : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
a : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
a : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
a : Optional[int] = None
if attr_name.endswith("Tokenizer" ):
a : Union[str, Any] = slow_tokenizers
a : List[str] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
a : int = fast_tokenizers
a : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
a : Tuple = tf_models
a : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
a : Tuple = flax_models
a : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
a : Any = pt_models
a : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
a : Union[str, Any] = True
break
# Try again after removing the last word in the name
a : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
a : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
a : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
a : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
a : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
a : List[Any] = model_name_to_prefix[name]
a : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def _SCREAMING_SNAKE_CASE ( _lowercase : int=False ) ->Tuple:
'''simple docstring'''
a : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
a : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a : Tuple = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase : int = f'''Expected string as input, found {type(lowerCAmelCase_ )}'''
raise ValueError(lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase : Union[str, Any] = f'''Expected boolean as use_pascal parameter, found {type(lowerCAmelCase_ )}'''
raise ValueError(lowerCAmelCase_ )
lowercase : Optional[int] = input_str.split('_' )
lowercase : Dict = 0 if use_pascal else 1
lowercase : Optional[Any] = words[start_index:]
lowercase : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase : Optional[int] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 255 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowerCamelCase__ ( _a):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}")
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a_ = parser.parse_args()
a_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 76 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
assert column_title.isupper()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = len(lowerCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE__ : Dict = 0
while index >= 0:
SCREAMING_SNAKE_CASE__ : str = (ord(column_title[index] ) - 64) * pow(26 ,lowerCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 25 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
def lowercase__ ( __snake_case : Union[str, Any] = 10**9 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> List[str]:
__snake_case = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
__snake_case = [5, 11, 17, 23]
__snake_case = [256, 512, 1024, 1024]
__snake_case = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__snake_case = 768
__snake_case = [1, 1, 1, 0.5]
__snake_case = [256, 512, 768, 768]
__snake_case = 150
__snake_case = 16
__snake_case = (1, 384, 384)
__snake_case = False
__snake_case = '''project'''
if "ade" in checkpoint_url:
__snake_case = True
__snake_case = 768
__snake_case = [1, 1, 1, 0.5]
__snake_case = 150
__snake_case = 16
__snake_case = '''huggingface/label-files'''
__snake_case = '''ade20k-id2label.json'''
__snake_case = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> int:
__snake_case = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__snake_case = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__snake_case = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
__snake_case = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__snake_case = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__snake_case = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
__snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
__snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__snake_case = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__snake_case = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__snake_case = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__snake_case = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__snake_case = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__snake_case = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__snake_case = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__snake_case = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__snake_case = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__snake_case = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__snake_case = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__snake_case = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__snake_case = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__snake_case = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__snake_case = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__snake_case = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__snake_case = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
__snake_case = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
__snake_case = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
__snake_case = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__snake_case = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
__snake_case = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
__snake_case = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
__snake_case = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
__snake_case = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[: config.hidden_size, :]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ) -> int:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int ) -> List[Any]:
__snake_case = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(lowerCAmelCase_ )
__snake_case = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
__snake_case = DPTForSemanticSegmentation(lowerCAmelCase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
__snake_case = 480 if '''ade''' in checkpoint_url else 384
__snake_case = DPTImageProcessor(size=lowerCAmelCase_ )
__snake_case = prepare_img()
__snake_case = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
# forward pass
__snake_case = model(**lowerCAmelCase_ ).logits if '''ade''' in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
if show_prediction:
__snake_case = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=lowerCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 24 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_: Optional[int] ={
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_: List[str] ={
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_: str ={
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
SCREAMING_SNAKE_CASE_: str ={
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
SCREAMING_SNAKE_CASE_: List[Any] ={
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
SCREAMING_SNAKE_CASE_: int ={
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE_: Tuple ={
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE_: int ={
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __A ( UpperCAmelCase__ ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ : Any = DPRContextEncoderTokenizer
class __A ( UpperCAmelCase__ ):
a__ : str = VOCAB_FILES_NAMES
a__ : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ : Dict = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE_: Dict =collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
SCREAMING_SNAKE_CASE_: Optional[int] =collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
SCREAMING_SNAKE_CASE_: List[Any] =R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class __A :
def __call__(self : str , __a : Any , __a : str = None , __a : Tuple = None , __a : Optional[int] = False , __a : Tuple = False , __a : Any = None , __a : Any = None , __a : Tuple = None , **__a : Optional[int] , ):
if titles is None and texts is None:
return super().__call__(
__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
elif titles is None or texts is None:
UpperCAmelCase_ = titles if texts is None else texts
return super().__call__(
__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
UpperCAmelCase_ = titles if not isinstance(__lowercase , __lowercase ) else [titles]
UpperCAmelCase_ = texts if not isinstance(__lowercase , __lowercase ) else [texts]
UpperCAmelCase_ = len(__lowercase )
UpperCAmelCase_ = questions if not isinstance(__lowercase , __lowercase ) else [questions] * n_passages
assert len(__lowercase ) == len(
__lowercase ), f"""There should be as many titles than texts but got {len(__lowercase )} titles and {len(__lowercase )} texts."""
UpperCAmelCase_ = super().__call__(__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase )['''input_ids''']
UpperCAmelCase_ = super().__call__(__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase )['''input_ids''']
UpperCAmelCase_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowercase , __lowercase )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ = attention_mask
return self.pad(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors=__lowercase )
def _lowercase (self : Union[str, Any] , __a : Dict , __a : Optional[int] , __a : List[str] = 16 , __a : List[Any] = 64 , __a : int = 4 , ):
UpperCAmelCase_ = reader_input['''input_ids''']
UpperCAmelCase_ = reader_output[:3]
UpperCAmelCase_ = len(__lowercase )
UpperCAmelCase_ = sorted(range(__lowercase ) , reverse=__lowercase , key=relevance_logits.__getitem__ )
UpperCAmelCase_ = []
for doc_id in sorted_docs:
UpperCAmelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ = len(__lowercase )
UpperCAmelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowercase , top_spans=__lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowercase , start_index=__lowercase , end_index=__lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase (self : Dict , __a : Union[str, Any] , __a : Any , __a : List[str] , __a : Dict , ):
UpperCAmelCase_ = []
for start_index, start_score in enumerate(__lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ = sorted(__lowercase , key=lambda __a : x[1] , reverse=__lowercase )
UpperCAmelCase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase_ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
a__ : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
a__ : Optional[Any] = DPRReaderTokenizer
| 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 0 |
'''simple docstring'''
import random
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple:
'''simple docstring'''
snake_case_ = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase_ )
elif element > pivot:
greater.append(lowerCAmelCase_ )
else:
equal.append(lowerCAmelCase_ )
return less, equal, greater
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if index >= len(lowerCAmelCase_ ) or index < 0:
return None
snake_case_ = items[random.randint(0, len(lowerCAmelCase_ ) - 1 )]
snake_case_ = 0
snake_case_ = _partition(lowerCAmelCase_, lowerCAmelCase_ )
snake_case_ = len(lowerCAmelCase_ )
snake_case_ = len(lowerCAmelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase_, lowerCAmelCase_ )
# must be in larger
else:
return quick_select(lowerCAmelCase_, index - (m + count) )
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[Any] = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return image
def lowerCamelCase_ (UpperCamelCase__ : Any ):
_UpperCAmelCase : int = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : List[Any] = dct.pop(UpperCamelCase__ )
_UpperCAmelCase : Dict = val
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : Tuple = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCAmelCase : Optional[int] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCAmelCase : str = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
_UpperCAmelCase : Any = qkv_bias
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : Union[str, Any] = 364 if '''coco''' in model_name else 224
_UpperCAmelCase : Tuple = InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_UpperCAmelCase : Any = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_UpperCAmelCase : Any = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
_UpperCAmelCase : Optional[int] = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_UpperCAmelCase : Dict = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
_UpperCAmelCase : Tuple = InstructBlipConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ , qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=False ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_UpperCAmelCase : Tuple = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_UpperCAmelCase : Any = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_UpperCAmelCase , _UpperCAmelCase : int = get_blipa_config(UpperCamelCase__ )
_UpperCAmelCase : List[str] = InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
_UpperCAmelCase : int = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_UpperCAmelCase , _UpperCAmelCase : Tuple = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_UpperCAmelCase : List[str] = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_UpperCAmelCase : Optional[int] = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
_UpperCAmelCase : int = original_model.state_dict()
_UpperCAmelCase : str = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : int = state_dict.pop(UpperCamelCase__ )
if key.startswith('''Qformer.bert''' ):
_UpperCAmelCase : Optional[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
_UpperCAmelCase : str = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
_UpperCAmelCase : Dict = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
_UpperCAmelCase : int = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
_UpperCAmelCase : Any = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
_UpperCAmelCase : Union[str, Any] = key.replace('''t5''' , '''language''' )
_UpperCAmelCase : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = load_demo_image()
_UpperCAmelCase : Optional[Any] = '''What is unusual about this image?'''
# create processor
_UpperCAmelCase : Union[str, Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = InstructBlipProcessor(
image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ , )
_UpperCAmelCase : Optional[int] = processor(images=UpperCamelCase__ , text=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
_UpperCAmelCase : int = vis_processors['''eval'''](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
_UpperCAmelCase : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
_UpperCAmelCase : Dict = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_UpperCAmelCase : Optional[int] = hf_model(**UpperCamelCase__ ).logits
else:
_UpperCAmelCase : List[str] = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_UpperCAmelCase : Tuple = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
_UpperCAmelCase : Any = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Tuple = hf_model(**UpperCamelCase__ , labels=UpperCamelCase__ ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_UpperCAmelCase : Union[str, Any] = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase__ , atol=UpperCamelCase__ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_UpperCAmelCase : Union[str, Any] = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_UpperCAmelCase : Optional[int] = hf_model.generate(
**UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_UpperCAmelCase : str = 2
print('''Original generation:''' , UpperCamelCase__ )
_UpperCAmelCase : Dict = processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : int = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase :str = argparse.ArgumentParser()
_lowerCAmelCase :List[str] = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_lowerCAmelCase :str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase :Any = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__ , id=UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_UpperCAmelCase : int = 0
# Doctest custom flag to ignore output.
_lowerCAmelCase :Dict = doctest.register_optionflag('IGNORE_RESULT')
_lowerCAmelCase :Union[str, Any] = doctest.OutputChecker
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A , A ) -> Tuple:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A , A , A )
_lowerCAmelCase :int = CustomOutputChecker
_lowerCAmelCase :List[Any] = HfDoctestModule
_lowerCAmelCase :Union[str, Any] = HfDocTestParser
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
_UpperCAmelCase : int = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_UpperCAmelCase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
_UpperCAmelCase : List[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
_UpperCAmelCase : int = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
return image
def lowerCamelCase_ (UpperCamelCase__ : str ):
if "visual_encoder" in key:
_UpperCAmelCase : Optional[int] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , UpperCamelCase__ )
if "blocks" in key:
_UpperCAmelCase : Tuple = re.sub(r'''blocks''' , '''layers''' , UpperCamelCase__ )
if "attn" in key:
_UpperCAmelCase : Dict = re.sub(r'''attn''' , '''self_attn''' , UpperCamelCase__ )
if "norm1" in key:
_UpperCAmelCase : Any = re.sub(r'''norm1''' , '''layer_norm1''' , UpperCamelCase__ )
if "norm2" in key:
_UpperCAmelCase : Union[str, Any] = re.sub(r'''norm2''' , '''layer_norm2''' , UpperCamelCase__ )
if "encoder.norm" in key:
_UpperCAmelCase : List[Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , UpperCamelCase__ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase : List[str] = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , UpperCamelCase__ )
if "encoder.pos_embed" in key:
_UpperCAmelCase : Union[str, Any] = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , UpperCamelCase__ )
if "encoder.cls_token" in key:
_UpperCAmelCase : Tuple = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , UpperCamelCase__ )
if "self_attn" in key:
_UpperCAmelCase : Union[str, Any] = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , UpperCamelCase__ )
return key
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : str=None ):
if config_path is not None:
_UpperCAmelCase : Tuple = BlipConfig.from_pretrained(UpperCamelCase__ )
else:
_UpperCAmelCase : Any = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_UpperCAmelCase : Union[str, Any] = BlipForConditionalGeneration(UpperCamelCase__ ).eval()
_UpperCAmelCase : int = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_UpperCAmelCase : Dict = blip_decoder(pretrained=UpperCamelCase__ , image_size=384 , vit='''base''' )
_UpperCAmelCase : Any = pt_model.eval()
_UpperCAmelCase : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase : Any = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase : Dict = rename_key(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = value
hf_model.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = 384
_UpperCAmelCase : List[Any] = load_demo_image(image_size=UpperCamelCase__ , device='''cpu''' )
_UpperCAmelCase : Any = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCAmelCase : int = tokenizer(['''a picture of'''] ).input_ids
_UpperCAmelCase : List[str] = hf_model.generate(UpperCamelCase__ , UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_UpperCAmelCase : List[Any] = hf_model.generate(UpperCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase : List[Any] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_UpperCAmelCase : Tuple = blip_vqa(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' )
vqa_model.eval()
_UpperCAmelCase : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase : List[str] = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase : str = rename_key(UpperCamelCase__ )
_UpperCAmelCase : Tuple = value
_UpperCAmelCase : Dict = BlipForQuestionAnswering(UpperCamelCase__ )
hf_vqa_model.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = ['''How many dogs are in this image?''']
_UpperCAmelCase : Tuple = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase : Any = hf_vqa_model.generate(UpperCamelCase__ , UpperCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_UpperCAmelCase : Optional[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_UpperCAmelCase : str = blip_itm(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' )
itm_model.eval()
_UpperCAmelCase : Tuple = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase : List[str] = modified_state_dict.pop(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = rename_key(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = value
_UpperCAmelCase : Union[str, Any] = BlipForImageTextRetrieval(UpperCamelCase__ )
_UpperCAmelCase : List[str] = ['''A picture of a woman with a dog sitting in a beach''']
_UpperCAmelCase : str = tokenizer(
UpperCamelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase__ )
hf_itm_model.eval()
_UpperCAmelCase : Tuple = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
_UpperCAmelCase : Tuple = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_lowerCAmelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowerCAmelCase :int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *A , **A ) -> List[Any]:
pass
def lowerCamelCase_ (UpperCamelCase__ : Image ):
_UpperCAmelCase : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCamelCase_ (UpperCamelCase__ : Image ):
_UpperCAmelCase : Optional[int] = np.array(UpperCamelCase__ )
_UpperCAmelCase : Tuple = npimg.shape
return {"hash": hashimage(UpperCamelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
a__ =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self , A , A , A ) -> str:
_UpperCAmelCase : int = MaskGenerationPipeline(model=A , image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self , A , A ) -> Optional[Any]:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_UpperCAmelCase : Union[str, Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6 )
# Shortening by hashing
_UpperCAmelCase : int = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : int = '''facebook/sam-vit-huge'''
_UpperCAmelCase : Optional[int] = pipeline('''mask-generation''' , model=A )
_UpperCAmelCase : Optional[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
_UpperCAmelCase : Tuple = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_053},
] , )
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_UpperCAmelCase : Optional[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_UpperCAmelCase : int = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_UpperCAmelCase , _UpperCAmelCase : str = i, i + z_result[i] - 1
return z_result
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : str ):
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : Dict = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_UpperCAmelCase : Dict = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
_UpperCAmelCase : str = str(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = ''''''.join(sorted(UpperCamelCase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase_ (UpperCamelCase__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[int] = 1
while True:
if check_bouncy(UpperCamelCase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCAmelCase : Optional[int] = np.concatenate(UpperCamelCase__ , axis=0 )
_UpperCAmelCase : Dict = np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase : Any = 2.0 * image - 1.0
_UpperCAmelCase : Dict = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase : Dict = torch.cat(UpperCamelCase__ , dim=0 )
return image
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=0.9995 ):
if not isinstance(UpperCamelCase__ , np.ndarray ):
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Optional[int] = va.device
_UpperCAmelCase : str = va.cpu().numpy()
_UpperCAmelCase : Any = va.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = np.sum(va * va / (np.linalg.norm(UpperCamelCase__ ) * np.linalg.norm(UpperCamelCase__ )) )
if np.abs(UpperCamelCase__ ) > DOT_THRESHOLD:
_UpperCAmelCase : Optional[int] = (1 - t) * va + t * va
else:
_UpperCAmelCase : Union[str, Any] = np.arccos(UpperCamelCase__ )
_UpperCAmelCase : int = np.sin(UpperCamelCase__ )
_UpperCAmelCase : Tuple = theta_a * t
_UpperCAmelCase : Optional[int] = np.sin(UpperCamelCase__ )
_UpperCAmelCase : int = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCAmelCase : Optional[Any] = sin_theta_t / sin_theta_a
_UpperCAmelCase : List[Any] = sa * va + sa * va
if inputs_are_torch:
_UpperCAmelCase : Tuple = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
return va
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = F.normalize(UpperCamelCase__ , dim=-1 )
_UpperCAmelCase : List[Any] = F.normalize(UpperCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
for param in model.parameters():
_UpperCAmelCase : Optional[Any] = value
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> Any:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_UpperCAmelCase : int = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_UpperCAmelCase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def __lowerCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(A )
def __lowerCAmelCase ( self ) -> int:
set_requires_grad(self.vae , A )
def __lowerCAmelCase ( self ) -> Tuple:
set_requires_grad(self.vae , A )
def __lowerCAmelCase ( self ) -> str:
set_requires_grad(self.unet , A )
def __lowerCAmelCase ( self ) -> int:
set_requires_grad(self.unet , A )
def __lowerCAmelCase ( self , A , A , A ) -> Optional[Any]:
# get the original timestep using init_timestep
_UpperCAmelCase : Any = min(int(num_inference_steps * strength ) , A )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , A , A , A , A , A , A=None ) -> str:
if not isinstance(A , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(A )}' )
_UpperCAmelCase : Optional[int] = image.to(device=A , dtype=A )
if isinstance(A , A ):
_UpperCAmelCase : Union[str, Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_UpperCAmelCase : str = torch.cat(A , dim=0 )
else:
_UpperCAmelCase : int = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[Any] = 0.18_215 * init_latents
_UpperCAmelCase : Any = init_latents.repeat_interleave(A , dim=0 )
_UpperCAmelCase : int = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_UpperCAmelCase : Dict = self.scheduler.add_noise(A , A , A )
_UpperCAmelCase : int = init_latents
return latents
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : List[str] = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCAmelCase : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_UpperCAmelCase : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __lowerCAmelCase ( self , A , A ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.feature_extractor.preprocess(A )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_UpperCAmelCase : Optional[Any] = self.clip_model.get_image_features(A )
_UpperCAmelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_UpperCAmelCase : str = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , ) -> Tuple:
_UpperCAmelCase : Optional[Any] = latents.detach().requires_grad_()
_UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_UpperCAmelCase : List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_UpperCAmelCase : List[str] = self.scheduler.alphas_cumprod[timestep]
_UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCAmelCase : Optional[Any] = torch.sqrt(A )
_UpperCAmelCase : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_UpperCAmelCase : List[str] = self.scheduler.sigmas[index]
_UpperCAmelCase : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[int] = 1 / 0.18_215 * sample
_UpperCAmelCase : int = self.vae.decode(A ).sample
_UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Union[str, Any] = transforms.Resize(self.feature_extractor_size )(A )
_UpperCAmelCase : Union[str, Any] = self.normalize(A ).to(latents.dtype )
_UpperCAmelCase : Dict = self.clip_model.get_image_features(A )
_UpperCAmelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_UpperCAmelCase : Union[str, Any] = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_UpperCAmelCase : Dict = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_UpperCAmelCase : Dict = latents.detach() + grads * (sigma**2)
_UpperCAmelCase : Tuple = noise_pred_original
else:
_UpperCAmelCase : List[Any] = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , A , A , A = None , A = None , A = 5_1_2 , A = 5_1_2 , A = 0.6 , A = 5_0 , A = 7.5 , A = 1 , A = 0.0 , A = 1_0_0 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> Dict:
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(A )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_UpperCAmelCase : Any = [generator] + [None] * (batch_size - 1)
_UpperCAmelCase : Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCAmelCase : Dict = [x[0] for x in coca_is_none if x[1]]
_UpperCAmelCase : Tuple = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_UpperCAmelCase : str = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_UpperCAmelCase : Any = self.get_image_description(A )
# get prompt text embeddings for content and style
_UpperCAmelCase : Any = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_UpperCAmelCase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_UpperCAmelCase : Optional[Any] = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_UpperCAmelCase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_UpperCAmelCase : int = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Any = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_UpperCAmelCase : Any = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_UpperCAmelCase : List[Any] = {}
if accepts_offset:
_UpperCAmelCase : Tuple = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(A , A , self.device )
_UpperCAmelCase : Optional[Any] = timesteps[:1].repeat(A )
# Preprocess image
_UpperCAmelCase : Union[str, Any] = preprocess(A , A , A )
_UpperCAmelCase : Dict = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_UpperCAmelCase : Tuple = preprocess(A , A , A )
_UpperCAmelCase : List[str] = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_UpperCAmelCase : Optional[int] = slerp(A , A , A )
if clip_guidance_scale > 0:
_UpperCAmelCase : Dict = self.get_clip_image_embeddings(A , A )
_UpperCAmelCase : List[Any] = self.get_clip_image_embeddings(A , A )
_UpperCAmelCase : Dict = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase : Dict = content_text_input.input_ids.shape[-1]
_UpperCAmelCase : Tuple = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_UpperCAmelCase : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCAmelCase : int = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_UpperCAmelCase : Optional[Any] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_UpperCAmelCase : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
_UpperCAmelCase : Union[str, Any] = eta
# check if the scheduler accepts generator
_UpperCAmelCase : Any = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_UpperCAmelCase : Optional[int] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_UpperCAmelCase : Tuple = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : int = noise_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCAmelCase : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCAmelCase , _UpperCAmelCase : str = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Any = 1 / 0.18_215 * latents
_UpperCAmelCase : Any = self.vae.decode(A ).sample
_UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A = None , A = None , A = None , A = False , A = False , A = None , A = None , **A , ) -> List[Any]:
super().__init__(
A , split=A , features=A , cache_dir=A , keep_in_memory=A , streaming=A , num_proc=A , **A , )
_UpperCAmelCase : Optional[Any] = field
_UpperCAmelCase : Optional[Any] = path_or_paths if isinstance(A , A ) else {self.split: path_or_paths}
_UpperCAmelCase : Union[str, Any] = Json(
cache_dir=A , data_files=A , features=A , field=A , **A , )
def __lowerCAmelCase ( self ) -> Tuple:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : int = None
_UpperCAmelCase : int = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , num_proc=self.num_proc , )
_UpperCAmelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A , A = None , A = None , **A , ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
_UpperCAmelCase : Union[str, Any] = dataset
_UpperCAmelCase : Tuple = path_or_buf
_UpperCAmelCase : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase : int = num_proc
_UpperCAmelCase : int = '''utf-8'''
_UpperCAmelCase : Any = to_json_kwargs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[Any] = self.to_json_kwargs.pop('''path_or_buf''' , A )
_UpperCAmelCase : Tuple = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCAmelCase : Dict = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCAmelCase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCAmelCase : int = self.to_json_kwargs.pop('''compression''' , A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=A ) as buffer:
_UpperCAmelCase : Optional[int] = self._write(file_obj=A , orient=A , lines=A , index=A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
_UpperCAmelCase : List[Any] = self._write(
file_obj=self.path_or_buf , orient=A , lines=A , index=A , **self.to_json_kwargs )
return written
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = args
_UpperCAmelCase : int = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase : Dict = batch.to_pandas().to_json(
path_or_buf=A , orient=A , lines=A , index=A , **A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCAmelCase ( self , A , A , A , A , **A , ) -> int:
_UpperCAmelCase : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCAmelCase : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A )
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(A )
return written
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Union[str, Any] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :List[str] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
a__ =RobertaTokenizer
def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> Dict:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
_UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
_UpperCAmelCase : Any = getattr(A , pre_tok_state.pop('''type''' ) )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : int = pre_tok_class(**A )
_UpperCAmelCase : List[str] = add_prefix_space
_UpperCAmelCase : Dict = '''post_processor'''
_UpperCAmelCase : Optional[Any] = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
_UpperCAmelCase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : int = tuple(state['''sep'''] )
if "cls" in state:
_UpperCAmelCase : Any = tuple(state['''cls'''] )
_UpperCAmelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
_UpperCAmelCase : List[str] = add_prefix_space
_UpperCAmelCase : List[str] = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
_UpperCAmelCase : List[Any] = trim_offsets
_UpperCAmelCase : List[str] = True
if changes_to_apply:
_UpperCAmelCase : Tuple = getattr(A , state.pop('''type''' ) )
_UpperCAmelCase : Dict = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
_UpperCAmelCase : Tuple = value
def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding:
_UpperCAmelCase : List[str] = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding:
_UpperCAmelCase : List[Any] = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def __lowerCAmelCase ( self , A , A=None ) -> List[Any]:
_UpperCAmelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A , A = None , ) -> Any:
super().__init__()
self.register_modules(transformer=A , vae=A , scheduler=A )
# create a imagenet -> id dictionary for easier use
_UpperCAmelCase : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_UpperCAmelCase : Tuple = int(A )
_UpperCAmelCase : Union[str, Any] = dict(sorted(self.labels.items() ) )
def __lowerCAmelCase ( self , A ) -> List[int]:
if not isinstance(A , A ):
_UpperCAmelCase : str = list(A )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , A , A = 4.0 , A = None , A = 5_0 , A = "pil" , A = True , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase : Optional[int] = len(A )
_UpperCAmelCase : Dict = self.transformer.config.sample_size
_UpperCAmelCase : List[str] = self.transformer.config.in_channels
_UpperCAmelCase : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A , device=self.device , dtype=self.transformer.dtype , )
_UpperCAmelCase : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCAmelCase : List[str] = torch.tensor(A , device=self.device ).reshape(-1 )
_UpperCAmelCase : Tuple = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_UpperCAmelCase : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCAmelCase : Tuple = latent_model_input[: len(A ) // 2]
_UpperCAmelCase : int = torch.cat([half, half] , dim=0 )
_UpperCAmelCase : List[Any] = self.scheduler.scale_model_input(A , A )
_UpperCAmelCase : List[str] = t
if not torch.is_tensor(A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCAmelCase : Any = latent_model_input.device.type == '''mps'''
if isinstance(A , A ):
_UpperCAmelCase : Dict = torch.floataa if is_mps else torch.floataa
else:
_UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
_UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCAmelCase : Tuple = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCAmelCase : Dict = self.transformer(
A , timestep=A , class_labels=A ).sample
# perform guidance
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = torch.split(A , len(A ) // 2 , dim=0 )
_UpperCAmelCase : List[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCAmelCase : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCAmelCase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = torch.split(A , A , dim=1 )
else:
_UpperCAmelCase : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(A , A , A ).prev_sample
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCAmelCase : Tuple = latent_model_input
_UpperCAmelCase : int = 1 / self.vae.config.scaling_factor * latents
_UpperCAmelCase : List[Any] = self.vae.decode(A ).sample
_UpperCAmelCase : Optional[int] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[Any] = self.numpy_to_pil(A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A )
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Any = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''git_vision_model'''
def __init__( self , A=7_6_8 , A=3_0_7_2 , A=1_2 , A=1_2 , A=3 , A=2_2_4 , A=1_6 , A="quick_gelu" , A=1E-5 , A=0.0 , A=0.02 , **A , ) -> Optional[int]:
super().__init__(**A )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Optional[int] = hidden_act
@classmethod
def __lowerCAmelCase ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
_UpperCAmelCase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''git'''
def __init__( self , A=None , A=3_0_5_2_2 , A=7_6_8 , A=6 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=1_0_2_4 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=True , A=False , A=1_0_1 , A=1_0_2 , A=None , **A , ) -> Any:
super().__init__(bos_token_id=A , eos_token_id=A , pad_token_id=A , **A )
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
_UpperCAmelCase : Any = GitVisionConfig(**A )
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : List[str] = tie_word_embeddings
_UpperCAmelCase : int = num_image_with_embedding
_UpperCAmelCase : Union[str, Any] = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.vision_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Union[str, Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = str(id_ )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[Any] = {} # {vertex:distance}
def __lt__( self , A ) -> List[Any]:
return self.key < other.key
def __repr__( self ) -> int:
return self.id
def __lowerCAmelCase ( self , A ) -> List[str]:
self.neighbors.append(A )
def __lowerCAmelCase ( self , A , A ) -> Any:
_UpperCAmelCase : Dict = weight
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCamelCase__ )
graph[b - 1].add_edge(graph[a - 1] , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : Vertex ):
_UpperCAmelCase : str = []
for u in graph:
_UpperCAmelCase : Optional[int] = math.inf
_UpperCAmelCase : int = None
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = graph[:]
while q:
_UpperCAmelCase : int = min(UpperCamelCase__ )
q.remove(UpperCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Optional[Any] = u
_UpperCAmelCase : str = u.edges[v.id]
for i in range(1 , len(UpperCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : Vertex ):
for u in graph:
_UpperCAmelCase : Any = math.inf
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[str] = list(UpperCamelCase__ )
hq.heapify(UpperCamelCase__ )
while h:
_UpperCAmelCase : Union[str, Any] = hq.heappop(UpperCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_UpperCAmelCase : int = u
_UpperCAmelCase : str = u.edges[v.id]
hq.heapify(UpperCamelCase__ )
for i in range(1 , len(UpperCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :List[Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
_lowerCAmelCase :Dict = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_lowerCAmelCase :List[Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCAmelCase :List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCAmelCase :int = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , A , A ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCAmelCase : str = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : int = evaluate(dataset=A , predictions=A )
return score
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = {}
def __lowerCAmelCase ( self , A ) -> None:
_UpperCAmelCase : Tuple = {}
def __lowerCAmelCase ( self , A , A , A ) -> None:
if nodea not in self.connections:
self.add_node(A )
if nodea not in self.connections:
self.add_node(A )
_UpperCAmelCase : Optional[Any] = probability
def __lowerCAmelCase ( self ) -> list[str]:
return list(self.connections )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : list[tuple[str, str, float]] , UpperCamelCase__ : int ):
_UpperCAmelCase : str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = Counter(graph.get_nodes() )
_UpperCAmelCase : Any = start
for _ in range(UpperCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase :Union[str, Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
_UpperCAmelCase : Optional[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
_UpperCAmelCase : Union[str, Any] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCAmelCase : Optional[Any] = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
_UpperCAmelCase : List[Any] = text_classifier('''This is great !''' , return_all_scores=A )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
_UpperCAmelCase : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCAmelCase : Tuple = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
_UpperCAmelCase : Optional[Any] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
_UpperCAmelCase : str = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
_UpperCAmelCase : Any = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = pipeline('''text-classification''' )
_UpperCAmelCase : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Optional[int] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = pipeline('''text-classification''' , framework='''tf''' )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_UpperCAmelCase : List[Any] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __lowerCAmelCase ( self , A , A , A ) -> int:
_UpperCAmelCase : Any = TextClassificationPipeline(model=A , tokenizer=A )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __lowerCAmelCase ( self , A , A ) -> Optional[int]:
_UpperCAmelCase : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_UpperCAmelCase : int = '''HuggingFace is in'''
_UpperCAmelCase : Tuple = text_classifier(A )
self.assertEqual(nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
_UpperCAmelCase : Any = ['''HuggingFace is in ''', '''Paris is in France''']
_UpperCAmelCase : Optional[Any] = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}, {'''label''': ANY(A ), '''score''': ANY(A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_UpperCAmelCase : int = text_classifier(A , top_k=A )
_UpperCAmelCase : List[str] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(A ) , [[{'''label''': ANY(A ), '''score''': ANY(A )}] * N, [{'''label''': ANY(A ), '''score''': ANY(A )}] * N] , )
_UpperCAmelCase : List[str] = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_UpperCAmelCase : Optional[Any] = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , {'''label''': ANY(A ), '''score''': ANY(A )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_UpperCAmelCase : List[str] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(A ):
text_classifier(A )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_UpperCAmelCase : Optional[Any] = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
_UpperCAmelCase : Tuple = [[float('''inf''' ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_UpperCAmelCase : str = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(UpperCamelCase__ ):
# looping through rows of graph array
for i in range(UpperCamelCase__ ):
# looping through columns of graph array
for j in range(UpperCamelCase__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_UpperCAmelCase : List[str] = dist[i][k] + dist[k][j]
_print_dist(UpperCamelCase__ , UpperCamelCase__ )
return dist, v
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = int(input('Enter number of vertices: '))
_lowerCAmelCase :List[Any] = int(input('Enter number of edges: '))
_lowerCAmelCase :Optional[int] = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
_lowerCAmelCase :List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
_lowerCAmelCase :Any = int(input('Enter source:'))
_lowerCAmelCase :str = int(input('Enter destination:'))
_lowerCAmelCase :Any = float(input('Enter weight:'))
_lowerCAmelCase :Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> None:
_UpperCAmelCase : Any = len(A )
_UpperCAmelCase : int = [0] * len_array
if len_array > 0:
_UpperCAmelCase : Union[str, Any] = array[0]
for i in range(1 , A ):
_UpperCAmelCase : int = self.prefix_sum[i - 1] + array[i]
def __lowerCAmelCase ( self , A , A ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowerCAmelCase ( self , A ) -> bool:
_UpperCAmelCase : Optional[int] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase :Optional[int] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_lowerCAmelCase :str = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_lowerCAmelCase :Union[str, Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_lowerCAmelCase :List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_lowerCAmelCase :Any = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase :List[Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_lowerCAmelCase :Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def lowerCamelCase_ ():
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) )
_UpperCAmelCase : List[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase_ (UpperCamelCase__ : int = 100 ):
return (generate_random_hand() for _ in range(UpperCamelCase__ ))
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
assert PokerHand(UpperCamelCase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
assert PokerHand(UpperCamelCase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : List[Any] = PokerHand(UpperCamelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
assert PokerHand(UpperCamelCase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
def lowerCamelCase_ ():
_UpperCAmelCase : str = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS]
_UpperCAmelCase : List[str] = poker_hands.copy()
shuffle(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = chain(sorted(UpperCamelCase__ ) )
for index, hand in enumerate(UpperCamelCase__ ):
assert hand == poker_hands[index]
def lowerCamelCase_ ():
# Test that five high straights are compared correctly.
_UpperCAmelCase : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=UpperCamelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase_ ():
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_UpperCAmelCase : Optional[int] = PokerHand('''2C 4S AS 3D 5C''' )
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase_ ():
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase__ , '''poker_hands.txt''' )
with open(UpperCamelCase__ ) as file_hand:
for line in file_hand:
_UpperCAmelCase : Optional[Any] = line[:14].strip()
_UpperCAmelCase : Optional[Any] = line[15:].strip()
_UpperCAmelCase , _UpperCAmelCase : Tuple = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = player.compare_with(UpperCamelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 263 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : str = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowerCAmelCase :List[Any] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Tuple = test_results.split(''' ''' )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase : str = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[int] = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , UpperCamelCase__ ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Tuple = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_UpperCAmelCase : Optional[Any] = line
_UpperCAmelCase : Union[str, Any] = False
return failures
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> str:
_UpperCAmelCase : Optional[Any] = title
_UpperCAmelCase : Dict = doc_test_results['''time_spent'''].split(''',''' )[0]
_UpperCAmelCase : Dict = doc_test_results['''success''']
_UpperCAmelCase : Tuple = doc_test_results['''failures''']
_UpperCAmelCase : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase : List[Any] = doc_test_results
@property
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Tuple = [self._time_spent]
_UpperCAmelCase : List[Any] = 0
for time in time_spent:
_UpperCAmelCase : Optional[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A ) == 1:
_UpperCAmelCase : Dict = [0, 0, time_parts[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f'{int(A )}h{int(A )}m{int(A )}s'
@property
def __lowerCAmelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowerCAmelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowerCAmelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Any = 4_0
_UpperCAmelCase : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(A , A )}
_UpperCAmelCase : Union[str, Any] = ''''''
for category, failures in category_failures.items():
if len(A ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A )
@staticmethod
def __lowerCAmelCase ( ) -> Tuple:
_UpperCAmelCase : int = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=A , )
def __lowerCAmelCase ( self ) -> Dict:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
_UpperCAmelCase : Optional[Any] = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
_UpperCAmelCase : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=A , )
def __lowerCAmelCase ( self , A , A , A , A ) -> Dict:
_UpperCAmelCase : List[str] = ''''''
for key, value in failures.items():
_UpperCAmelCase : int = value[:2_0_0] + ''' [Truncated]''' if len(A ) > 2_5_0 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_UpperCAmelCase : Tuple = job_name
_UpperCAmelCase : Optional[int] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_UpperCAmelCase : Tuple = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowerCAmelCase ( self ) -> Dict:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
_UpperCAmelCase : int = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
_UpperCAmelCase : Dict = sorted(self.doc_test_results.items() , key=lambda A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
_UpperCAmelCase : Dict = f'*Num failures* :{len(job_result["failed"] )} \n'
_UpperCAmelCase : List[str] = job_result['''failures''']
_UpperCAmelCase : Tuple = self.get_reply_blocks(A , A , A , text=A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'Results for {job}' , blocks=A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def lowerCamelCase_ ():
_UpperCAmelCase : Dict = os.environ['''GITHUB_RUN_ID''']
_UpperCAmelCase : Optional[int] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_UpperCAmelCase : List[str] = requests.get(UpperCamelCase__ ).json()
_UpperCAmelCase : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_UpperCAmelCase : str = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase__ ):
_UpperCAmelCase : Tuple = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , UpperCamelCase__ )
return {}
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : int = {}
if os.path.exists(UpperCamelCase__ ):
_UpperCAmelCase : Tuple = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Optional[int] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.' ) from e
return _artifact
def lowerCamelCase_ ():
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> List[str]:
_UpperCAmelCase : Optional[int] = name
_UpperCAmelCase : str = []
def __str__( self ) -> Union[str, Any]:
return self.name
def __lowerCAmelCase ( self , A ) -> Any:
self.paths.append({'''name''': self.name, '''path''': path} )
_UpperCAmelCase : Dict[str, Artifact] = {}
_UpperCAmelCase : str = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase : Tuple = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase : Tuple = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
_lowerCAmelCase :Optional[int] = get_job_links()
_lowerCAmelCase :Any = retrieve_available_artifacts()
_lowerCAmelCase :List[Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowerCAmelCase :List[str] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowerCAmelCase :str = github_actions_job_links.get('run_doctests')
_lowerCAmelCase :List[Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
_lowerCAmelCase :List[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :int = handle_test_results(artifact['stats'])
_lowerCAmelCase :Tuple = failed
_lowerCAmelCase :Optional[Any] = success
_lowerCAmelCase :Optional[Any] = time_spent[1:-1] + ', '
_lowerCAmelCase :Tuple = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
_lowerCAmelCase :List[str] = line.replace('FAILED ', '')
_lowerCAmelCase :List[Any] = line.split()[0].replace('\n', '')
if "::" in line:
_lowerCAmelCase,_lowerCAmelCase :Optional[Any] = line.split('::')
else:
_lowerCAmelCase,_lowerCAmelCase :List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowerCAmelCase :Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowerCAmelCase :Tuple = all_failures[test] if test in all_failures else 'N/A'
_lowerCAmelCase :int = failure
break
_lowerCAmelCase :Any = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 1 |
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : int = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_UpperCAmelCase : Dict = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=None , A=None , A=None , A="resnet50" , A=3 , A=3_2 , A=3 , A=True , A=True , ) -> List[str]:
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
_UpperCAmelCase : Dict = stage_names
_UpperCAmelCase : List[str] = out_features
_UpperCAmelCase : List[str] = backbone
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : List[Any] = use_pretrained_backbone
_UpperCAmelCase : List[Any] = is_training
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : Optional[int] = TimmBackbone(config=A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : str = config_and_inputs
_UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(TimmBackbone,) if is_torch_available() else ()
a__ ={'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
a__ =False
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Any = TimmBackboneModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , has_text_modality=A )
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = '''resnet18'''
_UpperCAmelCase : int = '''microsoft/resnet-18'''
_UpperCAmelCase : List[str] = AutoBackbone.from_pretrained(A , use_timm_backbone=A )
_UpperCAmelCase : int = AutoBackbone.from_pretrained(A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_UpperCAmelCase : Any = AutoBackbone.from_pretrained(A , use_timm_backbone=A , out_indices=[1, 2, 3] )
_UpperCAmelCase : Any = AutoBackbone.from_pretrained(A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(A )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCAmelCase : Dict = self.all_model_classes[0]
_UpperCAmelCase : List[Any] = model_class(A )
model.to(A )
_UpperCAmelCase : str = self._prepare_for_class(A , A )
_UpperCAmelCase : str = model(**A )
_UpperCAmelCase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCAmelCase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCAmelCase : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(**A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCAmelCase : int = copy.deepcopy(A )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(**A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_UpperCAmelCase : Optional[Any] = copy.deepcopy(A )
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = model_class(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(**A )
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =['''image_processor''', '''tokenizer''']
a__ ='''LayoutLMv3ImageProcessor'''
a__ =('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , A=None , A=None , **A ) -> Dict:
_UpperCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
_UpperCAmelCase : Any = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__( self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_UpperCAmelCase : str = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
_UpperCAmelCase : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase : Tuple = features['''words''']
_UpperCAmelCase : List[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
_UpperCAmelCase : Union[str, Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_UpperCAmelCase : Optional[int] = self.get_overflowing_images(A , encoded_inputs['''overflow_to_sample_mapping'''] )
_UpperCAmelCase : Optional[int] = images
return encoded_inputs
def __lowerCAmelCase ( self , A , A ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(A )} and {len(A )}' )
return images_with_overflow
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Tuple:
return self.tokenizer.decode(*A , **A )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCAmelCase ( self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
_lowerCAmelCase :Optional[int] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
_lowerCAmelCase :Tuple = {
'jukebox': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_LYRIC_TOKENS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = version
_UpperCAmelCase : int = max_n_lyric_tokens
_UpperCAmelCase : Optional[Any] = n_genres
with open(A , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : int = json.load(A )
with open(A , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : List[Any] = json.load(A )
with open(A , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(A )
_UpperCAmelCase : List[Any] = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
_UpperCAmelCase : Tuple = oov.replace(r'''\-\'''' , r'''\-+\'''' )
_UpperCAmelCase : int = regex.compile(A )
_UpperCAmelCase : List[str] = {v: k for k, v in self.artists_encoder.items()}
_UpperCAmelCase : Dict = {v: k for k, v in self.genres_encoder.items()}
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __lowerCAmelCase ( self ) -> Tuple:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __lowerCAmelCase ( self ) -> List[str]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __lowerCAmelCase ( self , A , A , A ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
_UpperCAmelCase : Optional[Any] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
_UpperCAmelCase : Tuple = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_UpperCAmelCase : Any = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __lowerCAmelCase ( self , A ) -> Optional[int]:
return list(A )
def __lowerCAmelCase ( self , A , A , A , **A ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = self.prepare_for_tokenization(A , A , A )
_UpperCAmelCase : Union[str, Any] = self._tokenize(A )
return artist, genre, lyrics
def __lowerCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_UpperCAmelCase : str = artists[idx].lower()
_UpperCAmelCase : Optional[Any] = [genres[idx].lower()]
else:
_UpperCAmelCase : Optional[int] = self._normalize(artists[idx] ) + '''.v2'''
_UpperCAmelCase : Dict = [
self._normalize(A ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_UpperCAmelCase : Dict = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
_UpperCAmelCase : List[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
_UpperCAmelCase : str = {vocab[index]: index + 1 for index in range(len(A ) )}
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[Any] = len(A ) + 1
_UpperCAmelCase : Dict = self.vocab
_UpperCAmelCase : Any = {v: k for k, v in self.vocab.items()}
_UpperCAmelCase : List[Any] = ''''''
else:
_UpperCAmelCase : List[str] = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
_UpperCAmelCase : Dict = self._run_strip_accents(A )
_UpperCAmelCase : List[Any] = lyrics.replace('''\\''' , '''\n''' )
_UpperCAmelCase : int = self.out_of_vocab.sub('''''' , A ), [], []
return artists, genres, lyrics
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = unicodedata.normalize('''NFD''' , A )
_UpperCAmelCase : int = []
for char in text:
_UpperCAmelCase : str = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : Any = (
[chr(A ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(A ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(A ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
_UpperCAmelCase : Union[str, Any] = frozenset(A )
_UpperCAmelCase : List[Any] = re.compile(r'''_+''' )
_UpperCAmelCase : List[Any] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
_UpperCAmelCase : List[Any] = pattern.sub('''_''' , A ).strip('''_''' )
return text
def __lowerCAmelCase ( self , A ) -> str:
return " ".join(A )
def __lowerCAmelCase ( self , A , A = None , A = False ) -> Any:
# Convert to TensorType
if not isinstance(A , A ):
_UpperCAmelCase : Optional[Any] = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.constant
_UpperCAmelCase : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
_UpperCAmelCase : Dict = torch.tensor
_UpperCAmelCase : Optional[int] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
_UpperCAmelCase : Optional[Any] = jnp.array
_UpperCAmelCase : List[Any] = _is_jax
else:
_UpperCAmelCase : Tuple = np.asarray
_UpperCAmelCase : List[str] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_UpperCAmelCase : List[str] = [inputs]
if not is_tensor(A ):
_UpperCAmelCase : Tuple = as_tensor(A )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
_UpperCAmelCase : List[Any] = [0, 0, 0]
_UpperCAmelCase : Dict = [artist] * len(self.version )
_UpperCAmelCase : Any = [genres] * len(self.version )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = self.tokenize(A , A , A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = self._convert_token_to_id(A , A , A )
_UpperCAmelCase : List[str] = [-INFINITY] * len(full_tokens[-1] )
_UpperCAmelCase : List[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : int = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
_UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def __lowerCAmelCase ( self , A , A , A ) -> str:
_UpperCAmelCase : List[Any] = self.artists_decoder.get(A )
_UpperCAmelCase : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
_UpperCAmelCase : str = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list ):
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
if n == 0:
return 0
_UpperCAmelCase : List[str] = float('''-inf''' )
for i in range(1 , n + 1 ):
_UpperCAmelCase : List[Any] = max(
UpperCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase__ ) )
return max_revue
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list ):
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : int = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list , UpperCamelCase__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_UpperCAmelCase : str = float('''-inf''' )
for i in range(1 , n + 1 ):
_UpperCAmelCase : List[str] = max(
UpperCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase__ , UpperCamelCase__ ) , )
_UpperCAmelCase : Union[str, Any] = max_revenue
return max_rev[n]
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list ):
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_UpperCAmelCase : Any = [float('''-inf''' ) for _ in range(n + 1 )]
_UpperCAmelCase : int = 0
for i in range(1 , n + 1 ):
_UpperCAmelCase : Dict = max_rev[i]
for j in range(1 , i + 1 ):
_UpperCAmelCase : str = max(UpperCamelCase__ , prices[j - 1] + max_rev[i - j] )
_UpperCAmelCase : Optional[int] = max_revenue_i
return max_rev[n]
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list ):
if n < 0:
_UpperCAmelCase : Optional[int] = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(UpperCamelCase__ )
if n > len(UpperCamelCase__ ):
_UpperCAmelCase : int = (
'''Each integral piece of rod must have a corresponding price. '''
F'Got n = {n} but length of prices = {len(UpperCamelCase__ )}'
)
raise ValueError(UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = [6, 10, 12, 15, 20, 23]
_UpperCAmelCase : str = len(UpperCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_UpperCAmelCase : List[str] = 36
_UpperCAmelCase : Dict = top_down_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = bottom_up_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[str] = naive_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = u
for i in range(1 , UpperCamelCase__ ):
_UpperCAmelCase : List[Any] = temp * (u - i)
return temp
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = int(input('''enter the numbers of values: ''' ) )
_UpperCAmelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
_UpperCAmelCase : Dict = 0
print('''enter the values of parameters in a list: ''' )
_UpperCAmelCase : Tuple = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
_UpperCAmelCase : int = float(input() )
_UpperCAmelCase : List[str] = int(input('''enter the value to interpolate: ''' ) )
_UpperCAmelCase : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
_UpperCAmelCase : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
_UpperCAmelCase : str = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :int = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Dict = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : str = set()
_UpperCAmelCase : Tuple = []
def parse_line(UpperCamelCase__ : Tuple ):
for line in fp:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Tuple = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase : Any = '''\n'''.join(UpperCamelCase__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(UpperCamelCase__ )
buffer.clear()
continue
else:
_UpperCAmelCase : int = line.strip()
buffer.append(UpperCamelCase__ )
if from_gh:
for filename in os.listdir(UpperCamelCase__ ):
_UpperCAmelCase : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
else:
try:
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : str = set()
_UpperCAmelCase : Optional[int] = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__ , UpperCamelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return values.split(''',''' )
_lowerCAmelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
_lowerCAmelCase :List[Any] = parser.parse_args()
_lowerCAmelCase :List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCAmelCase :Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCAmelCase :Tuple = extract_warnings(args.output_dir, args.targets)
_lowerCAmelCase :Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''ibert'''
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=False , A="none" , **A , ) -> Dict:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Optional[int] = quant_mode
_UpperCAmelCase : List[str] = force_dequant
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
_lowerCAmelCase :List[str] = 50 # max width of layer names
_lowerCAmelCase :int = 70 # max width of quantizer names
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Optional[Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=UpperCamelCase__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=UpperCamelCase__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=UpperCamelCase__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=UpperCamelCase__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=UpperCamelCase__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=UpperCamelCase__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
if args.calibrator == "max":
_UpperCAmelCase : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_UpperCAmelCase : Any = '''histogram'''
elif args.calibrator == "mse":
_UpperCAmelCase : Optional[int] = '''histogram'''
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
_UpperCAmelCase : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase__ )
_UpperCAmelCase : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Union[str, Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase__ , ['''embeddings'''] , which='''weight''' , _disabled=UpperCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase__ , [''''''] , _disabled=UpperCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase__ , args.quant_disable_keyword , _disabled=UpperCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=UpperCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=UpperCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase__ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase__ , UpperCamelCase__ )
if args.clip_gelu:
clip_gelu(UpperCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ):
def fusea(UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_UpperCAmelCase : Dict = qq._amax.detach().item()
_UpperCAmelCase : Tuple = qk._amax.detach().item()
_UpperCAmelCase : Optional[int] = qv._amax.detach().item()
_UpperCAmelCase : Any = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
qq._amax.fill_(UpperCamelCase__ )
qk._amax.fill_(UpperCamelCase__ )
qv._amax.fill_(UpperCamelCase__ )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_UpperCAmelCase : int = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase__ )
_UpperCAmelCase : str = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase : List[Any] = mod.weight.shape[0]
_UpperCAmelCase : Dict = mod._weight_quantizer._amax.detach()
_UpperCAmelCase : str = torch.ones(UpperCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase : Union[str, Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase__ , keepdims=UpperCamelCase__ ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
_UpperCAmelCase : Union[str, Any] = amax
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=25 , UpperCamelCase__ : str=180 , UpperCamelCase__ : Union[str, Any]=None ):
if ignore is None:
_UpperCAmelCase : Optional[int] = []
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [ignore]
_UpperCAmelCase : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
_UpperCAmelCase : Dict = max(UpperCamelCase__ , len(UpperCamelCase__ ) )
for name, mod in model.named_modules():
_UpperCAmelCase : Any = getattr(UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ )
_UpperCAmelCase : int = getattr(UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
if type(UpperCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase__ ) is str and s in name]:
continue
_UpperCAmelCase : str = F'Act:{input_q.extra_repr()}'
_UpperCAmelCase : Optional[int] = F'Wgt:{weight_q.extra_repr()}'
_UpperCAmelCase : List[Any] = F'{name:{name_width}} {act_str} {wgt_str}'
if len(UpperCamelCase__ ) <= line_width:
logger.info(UpperCamelCase__ )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : str = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase__ , UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
logger.warning(F'{name} has no {quantizer}' )
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]="both" , **UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : List[str] = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_input_quantizer''' ) or hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
set_quantizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
import baseaa
def lowerCamelCase_ (UpperCamelCase__ : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def lowerCamelCase_ (UpperCamelCase__ : bytes ):
return baseaa.aaadecode(UpperCamelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase :Union[str, Any] = 'facebook/wmt19-en-de'
_lowerCAmelCase :Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase :List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase :List[str] = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_lowerCAmelCase :Any = tokenizer(['Making tiny model'], return_tensors='pt')
_lowerCAmelCase :Union[str, Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_lowerCAmelCase :str = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase :Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=None , ) -> List[Any]:
_UpperCAmelCase : Any = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
_UpperCAmelCase : int = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[str] = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : str = size
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Dict = do_convert_rgb
_UpperCAmelCase : Any = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : Dict = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def __lowerCAmelCase ( self ) -> Any:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_UpperCAmelCase : Any = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : Any = 2_0_4_8
_UpperCAmelCase : Optional[Any] = image_processor(A , return_tensors='''pt''' , max_patches=A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(A ):
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
_UpperCAmelCase : Union[str, Any] = '''Hello'''
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : int = image_processor(
A , return_tensors='''pt''' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> str:
# Initialize image_processor
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
_UpperCAmelCase : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processor
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : Optional[Any] = 3
@property
def __lowerCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Any = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from manim import *
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = VGroup(*A ).arrange(A , buff=0 )
_UpperCAmelCase : str = VGroup(*A ).arrange(A , buff=0 )
_UpperCAmelCase : Tuple = VGroup(A , A ).arrange(A , buff=0 )
_UpperCAmelCase : str = Text('''CPU''' , font_size=2_4 )
_UpperCAmelCase : List[Any] = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
_UpperCAmelCase : Tuple = [mem.copy() for i in range(1 )]
_UpperCAmelCase : List[Any] = VGroup(*A ).arrange(A , buff=0 )
_UpperCAmelCase : Union[str, Any] = Text('''GPU''' , font_size=2_4 )
_UpperCAmelCase : Optional[int] = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
gpu.align_to(A , A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[int] = VGroup(*A ).arrange(A , buff=0 )
_UpperCAmelCase : Tuple = Text('''Model''' , font_size=2_4 )
_UpperCAmelCase : Optional[Any] = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A , run_time=1 ) , Create(A , run_time=1 ) , Create(A , run_time=1 ) , )
_UpperCAmelCase : Tuple = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
_UpperCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : List[Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=2.5 ) , Write(A ) , Write(A ) )
self.add(A )
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A , opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.46 / 4
_UpperCAmelCase : str = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=A , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A , buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A , run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :Dict = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_lowerCAmelCase :List[str] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_lowerCAmelCase :Optional[Any] = '▁'
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Optional[int] = vocab_file
_UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_UpperCAmelCase : str = len(self.sp_model ) - 1
_UpperCAmelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
_UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Tuple:
return len(self.sp_model )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Union[str, Any] = self.sp_model.PieceToId(A )
return spm_id if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A )
_UpperCAmelCase : str = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
_UpperCAmelCase : Dict = self.__dict__.copy()
_UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self , A ) -> str:
_UpperCAmelCase : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Optional[int] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_UpperCAmelCase : Dict = model(A )['''last_hidden_state''']
_UpperCAmelCase : int = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
_UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCAmelCase :Any = None
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase :Union[str, Any] = {
'camembert-base': 512,
}
_lowerCAmelCase :Optional[int] = '▁'
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
a__ =CamembertTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=["<s>NOTUSED", "</s>NOTUSED"] , **A , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
_UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCAmelCase :Tuple = parser.parse_args()
_lowerCAmelCase :Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase :Union[str, Any] = ''
_lowerCAmelCase :Union[str, Any] = ''
_lowerCAmelCase :int = ''
_lowerCAmelCase :Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase_ ():
_UpperCAmelCase , _UpperCAmelCase : Tuple = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCAmelCase : List[Any] = random_chars(32 )
_UpperCAmelCase : Tuple = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
_UpperCAmelCase : Dict = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(UpperCamelCase__ )} with {file_name}' )
_UpperCAmelCase : List[str] = []
for anno in new_annos[index]:
_UpperCAmelCase : Dict = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(UpperCamelCase__ )
with open(F'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
_UpperCAmelCase : List[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
_UpperCAmelCase : Tuple = in_file.readlines()
_UpperCAmelCase : Optional[Any] = os.path.join(UpperCamelCase__ , F'{label_name}.jpg' )
_UpperCAmelCase : Any = []
for obj_list in obj_lists:
_UpperCAmelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCamelCase_ (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = []
_UpperCAmelCase : str = []
for idx in range(len(UpperCamelCase__ ) ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = img_list[idx]
path_list.append(UpperCamelCase__ )
_UpperCAmelCase : int = anno_list[idx]
_UpperCAmelCase : Any = cva.imread(UpperCamelCase__ )
if flip_type == 1:
_UpperCAmelCase : Dict = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
_UpperCAmelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_UpperCAmelCase : str = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
_UpperCAmelCase : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase_ (UpperCamelCase__ : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
_UpperCAmelCase : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 263 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_0 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=None , ) -> Dict:
_UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 1_8}
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[Any] = num_frames
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[Any] = min_resolution
_UpperCAmelCase : Dict = max_resolution
_UpperCAmelCase : Any = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : Tuple = do_normalize
_UpperCAmelCase : Optional[int] = image_mean
_UpperCAmelCase : int = image_std
_UpperCAmelCase : Dict = crop_size
def __lowerCAmelCase ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _UpperCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=A , )
def __lowerCAmelCase ( self , A , A ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __lowerCAmelCase ( self , A , A ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A )
class _UpperCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=A , )
def __lowerCAmelCase ( self , A , A ) -> Optional[int]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __lowerCAmelCase ( self , A , A ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A )
def lowerCamelCase_ ():
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase_ ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _UpperCAmelCase ( a ):
'''simple docstring'''
@require_beam
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : str = DummyBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __lowerCAmelCase ( self ) -> Optional[int]:
import apache_beam as beam
_UpperCAmelCase : Any = beam.io.parquetio.WriteToParquet
_UpperCAmelCase : Optional[int] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : Optional[Any] = DummyBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_UpperCAmelCase : Union[str, Any] = partial(A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __lowerCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : List[str] = DummyBeamDataset(cache_dir=A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : Optional[int] = NestedBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_UpperCAmelCase : str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=a ):
'''simple docstring'''
a__ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
_lowerCAmelCase :List[Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''gpt_bigcode'''
a__ =['''past_key_values''']
a__ ={
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=5_0_2_5_7 , A=1_0_2_4 , A=7_6_8 , A=1_2 , A=1_2 , A=None , A="gelu_pytorch_tanh" , A=0.1 , A=0.1 , A=0.1 , A=1E-5 , A=0.02 , A=True , A=True , A=5_0_2_5_6 , A=5_0_2_5_6 , A=True , A=True , A=True , **A , ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Dict = n_positions
_UpperCAmelCase : Union[str, Any] = n_embd
_UpperCAmelCase : Union[str, Any] = n_layer
_UpperCAmelCase : Optional[Any] = n_head
_UpperCAmelCase : str = n_inner
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : int = resid_pdrop
_UpperCAmelCase : Tuple = embd_pdrop
_UpperCAmelCase : Dict = attn_pdrop
_UpperCAmelCase : Optional[Any] = layer_norm_epsilon
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = scale_attn_weights
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : List[str] = attention_softmax_in_fpaa
_UpperCAmelCase : Dict = scale_attention_softmax_in_fpaa
_UpperCAmelCase : Union[str, Any] = multi_query
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : Any = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A )
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCAmelCase :str = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> str:
if isinstance(A , A ):
_UpperCAmelCase : List[str] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(A ) )
if isinstance(A , A ):
_UpperCAmelCase : Union[str, Any] = [sequences]
_UpperCAmelCase : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a )
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> int:
_UpperCAmelCase : List[Any] = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __lowerCAmelCase ( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> List[str]:
_UpperCAmelCase : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
_UpperCAmelCase : Optional[int] = self.tokenizer.eos_token
try:
_UpperCAmelCase : int = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCAmelCase ( self , **A ) -> Dict:
if kwargs.get('''multi_class''' , A ) is not None:
_UpperCAmelCase : List[Any] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
_UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : str = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Tuple = kwargs['''hypothesis_template''']
_UpperCAmelCase : Tuple = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[int] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Dict:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : List[str] = args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}' )
return super().__call__(A , **A )
def __lowerCAmelCase ( self , A , A=None , A="This example is {}." ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase : str = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
_UpperCAmelCase : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : int = inputs['''candidate_label''']
_UpperCAmelCase : Dict = inputs['''sequence''']
_UpperCAmelCase : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : Optional[Any] = self.model(**A )
_UpperCAmelCase : Optional[int] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def __lowerCAmelCase ( self , A , A=False ) -> Tuple:
_UpperCAmelCase : List[str] = [outputs['''candidate_label'''] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = [outputs['''sequence'''] for outputs in model_outputs]
_UpperCAmelCase : int = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[int] = logits.shape[0]
_UpperCAmelCase : int = len(A )
_UpperCAmelCase : int = N // n
_UpperCAmelCase : Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : Optional[int] = self.entailment_id
_UpperCAmelCase : Any = -1 if entailment_id == 0 else 0
_UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
_UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : List[Any] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
_UpperCAmelCase : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
# Base Case
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_UpperCAmelCase : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_UpperCAmelCase : Union[str, Any] = -1
return False
def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ):
_UpperCAmelCase : Tuple = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_UpperCAmelCase : List[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :str = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Tuple = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Tuple = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
a__ =MBartConfig
a__ ={}
a__ ='''gelu'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=3_2 , A=2 , A=4 , A=3_7 , A=0.1 , A=0.1 , A=2_0 , A=2 , A=1 , A=0 , ) -> List[Any]:
_UpperCAmelCase : str = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : str = eos_token_id
_UpperCAmelCase : Tuple = pad_token_id
_UpperCAmelCase : Optional[Any] = bos_token_id
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : List[str] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : Tuple = TFMBartModel(config=A ).get_decoder()
_UpperCAmelCase : Optional[int] = inputs_dict['''input_ids''']
_UpperCAmelCase : Tuple = input_ids[:1, :]
_UpperCAmelCase : int = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase : int = inputs_dict['''head_mask''']
_UpperCAmelCase : int = 1
# first forward pass
_UpperCAmelCase : str = model(A , attention_mask=A , head_mask=A , use_cache=A )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = outputs.to_tuple()
_UpperCAmelCase : Optional[int] = past_key_values[1]
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=None , ):
if attention_mask is None:
_UpperCAmelCase : int = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a__ =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
a__ =(
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ =True
a__ =False
a__ =False
def __lowerCAmelCase ( self , A , A , A , A , A ) -> str:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = TFMBartModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=A )
def __lowerCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =[
''' UN Chief Says There Is No Military Solution in Syria''',
]
a__ =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
a__ ='''facebook/mbart-large-en-ro'''
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self , **A ) -> str:
_UpperCAmelCase : Tuple = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def __lowerCAmelCase ( self , **A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , **A , return_tensors='''tf''' )
_UpperCAmelCase : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
self._assert_generated_batch_equal_expected()
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_lowerCAmelCase :Any = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : bool , UpperCamelCase__ : bool ):
def run_func(UpperCamelCase__ : List[Any] ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Tuple = random.Random()
_UpperCAmelCase : int = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ ="TensorFlow"
@property
def __lowerCAmelCase ( self ) -> List[str]:
return tf.__version__
def __lowerCAmelCase ( self , A , A , A ) -> float:
# initialize GPU on separate process
_UpperCAmelCase : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_UpperCAmelCase : str = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self , A , A , A ) -> float:
_UpperCAmelCase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_UpperCAmelCase : str = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCAmelCase ( self , A , A , A ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
_UpperCAmelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_UpperCAmelCase : Tuple = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self , A , A , A ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
_UpperCAmelCase : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_UpperCAmelCase : Optional[int] = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCAmelCase ( self , A , A , A ) -> Callable[[], None]:
_UpperCAmelCase : Dict = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_UpperCAmelCase : Dict = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase : Tuple = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase : Optional[Any] = __import__('''transformers''' , fromlist=[model_class] )
_UpperCAmelCase : Any = getattr(A , A )
_UpperCAmelCase : Any = model_cls(A )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_UpperCAmelCase : str = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase : Optional[Any] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
_UpperCAmelCase : List[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
_UpperCAmelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self , A , A , A ) -> Callable[[], None]:
_UpperCAmelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_UpperCAmelCase : List[Any] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase : Optional[int] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase : Any = __import__('''transformers''' , fromlist=[model_class] )
_UpperCAmelCase : List[str] = getattr(A , A )
_UpperCAmelCase : Union[str, Any] = model_cls(A )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_UpperCAmelCase : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase : str = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
_UpperCAmelCase : int = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase : Tuple = model(A , decoder_input_ids=A , labels=A , training=A )[0]
_UpperCAmelCase : Union[str, Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase : Dict = model(A , labels=A , training=A )[0]
_UpperCAmelCase : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
_UpperCAmelCase : Any = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self , A ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase : List[str] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
def __lowerCAmelCase ( self , A ) -> [Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_UpperCAmelCase : Optional[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_UpperCAmelCase : Optional[Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase : int = nvml.nvmlDeviceGetMemoryInfo(A )
_UpperCAmelCase : List[Any] = meminfo.used
_UpperCAmelCase : Any = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_UpperCAmelCase : Union[str, Any] = None
else:
_UpperCAmelCase : Optional[int] = measure_peak_memory_cpu(A )
_UpperCAmelCase : int = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase : List[Any] = stop_memory_tracing(A )
if memory is None:
_UpperCAmelCase : Tuple = summary.total
else:
_UpperCAmelCase : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =RobertaTokenizer
a__ =RobertaTokenizerFast
a__ =True
a__ ={'''cls_token''': '''<s>'''}
def __lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCAmelCase : Optional[int] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : int = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : List[Any] = '''lower newer'''
_UpperCAmelCase : Optional[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : str = '''lower newer'''
_UpperCAmelCase : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCAmelCase : str = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
_UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''roberta-base''' )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
_UpperCAmelCase : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
_UpperCAmelCase : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : Optional[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(A )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[str] = '''Encode this sequence.'''
_UpperCAmelCase : Optional[int] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
_UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
_UpperCAmelCase : int = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
_UpperCAmelCase : Union[str, Any] = '''Encode <mask> sequence'''
_UpperCAmelCase : str = '''Encode <mask>sequence'''
_UpperCAmelCase : List[Any] = tokenizer.encode(A )
_UpperCAmelCase : Tuple = encoded.index(A )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
_UpperCAmelCase : int = encoded.index(A )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
pass
def __lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : str = '''A, <mask> AllenNLP sentence.'''
_UpperCAmelCase : int = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
_UpperCAmelCase : Optional[int] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_UpperCAmelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_UpperCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __lowerCAmelCase ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCAmelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A )
self.assertEqual(post_processor_state['''add_prefix_space'''] , A )
self.assertEqual(post_processor_state['''trim_offsets'''] , A )
def __lowerCAmelCase ( self ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase : List[str] = f'{text_of_1_token} {text_of_1_token}'
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : int = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : Any = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
_UpperCAmelCase : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : Optional[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : Tuple = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
_UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : int = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_UpperCAmelCase : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
_UpperCAmelCase : Any = [sys.executable] + distributed_args
execute_subprocess_async(A , env=os.environ.copy() )
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A ) -> Optional[int]:
super().__init__()
_UpperCAmelCase : Dict = nn.ModuleList(A )
def __lowerCAmelCase ( self , A , A , A , A , A , A = None , A = None , A = None , A = None , A = False , A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Any = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , A , A = True , A = None , A = False , A = None , ) -> List[str]:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : List[Any] = model_path_to_save + f'_{idx}'
@classmethod
def __lowerCAmelCase ( cls , A , **A ) -> List[str]:
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Any = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : Optional[int] = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : Tuple = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : List[str] = pretrained_model_path + f'_{idx}'
logger.info(f'{len(A )} controlnets loaded from {pretrained_model_path}.' )
if len(A ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(A )
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
import numpy as np
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int = 8 , UpperCamelCase__ : int | None = None ):
_UpperCAmelCase : List[Any] = np.random.default_rng(seed=UpperCamelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_UpperCAmelCase : int = 6 * key_len
# Measurement basis for Alice's qubits.
_UpperCAmelCase : str = rng.integers(2 , size=UpperCamelCase__ )
# The set of states Alice will prepare.
_UpperCAmelCase : Tuple = rng.integers(2 , size=UpperCamelCase__ )
# Measurement basis for Bob's qubits.
_UpperCAmelCase : Optional[int] = rng.integers(2 , size=UpperCamelCase__ )
# Quantum Circuit to simulate BB84
_UpperCAmelCase : Optional[int] = qiskit.QuantumCircuit(UpperCamelCase__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCamelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCamelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCamelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCamelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCamelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_UpperCAmelCase : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_UpperCAmelCase : List[Any] = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1 , seed_simulator=UpperCamelCase__ )
# Returns the result of measurement.
_UpperCAmelCase : Union[str, Any] = job.result().get_counts(UpperCamelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_UpperCAmelCase : Union[str, Any] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_UpperCAmelCase : Any = gen_key[:key_len] if len(UpperCamelCase__ ) >= key_len else gen_key.ljust(UpperCamelCase__ , '''0''' )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :List[Any] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''roformer'''
def __init__( self , A=5_0_0_0_0 , A=None , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=1_5_3_6 , A=2 , A=0.02 , A=1E-12 , A=0 , A=False , A=True , **A , ) -> int:
super().__init__(pad_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = rotary_value
_UpperCAmelCase : List[Any] = use_cache
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : List[str] = {0: '''batch''', 1: '''sequence'''}
_UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
_UpperCAmelCase : int = 8
# DPR tok
_UpperCAmelCase : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCAmelCase : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : Any = os.path.join(A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_UpperCAmelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCAmelCase : List[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : str = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : Union[str, Any] = os.path.join(A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Tuple = os.path.join(A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __lowerCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __lowerCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
_UpperCAmelCase : Tuple = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_UpperCAmelCase : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(A )
rag_tokenizer.save_pretrained(A )
_UpperCAmelCase : List[Any] = RagTokenizer.from_pretrained(A , config=A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
_UpperCAmelCase : Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_UpperCAmelCase : Any = tokenizer(A )
self.assertIsNotNone(A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
_UpperCAmelCase : Union[str, Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_UpperCAmelCase : List[Any] = tokenizer(A )
self.assertIsNotNone(A )
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
"""simple docstring"""
import numpy as np
def lowerCamelCase_ (UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float = 1E-1_2 , UpperCamelCase__ : int = 100 , ):
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ )
_UpperCAmelCase : Tuple = np.iscomplexobj(UpperCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = 0
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase : Optional[Any] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
# Normalize the resulting output vector.
_UpperCAmelCase : Optional[Any] = w / np.linalg.norm(UpperCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase : str = vector.conj().T if is_complex else vector.T
_UpperCAmelCase : Any = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) )
# Check convergence.
_UpperCAmelCase : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Optional[int] = lambda_
if is_complex:
_UpperCAmelCase : Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase_ ():
_UpperCAmelCase : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase : List[str] = np.array([41, 4, 20] )
_UpperCAmelCase : int = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase : Optional[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase : str = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase : List[str] = real_input_matrix
_UpperCAmelCase : Optional[int] = real_vector
elif problem_type == "complex":
_UpperCAmelCase : str = complex_input_matrix
_UpperCAmelCase : Dict = complex_vector
# Our implementation.
_UpperCAmelCase , _UpperCAmelCase : str = power_iteration(UpperCamelCase__ , UpperCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = np.linalg.eigh(UpperCamelCase__ )
# Last eigenvalue is the maximum one.
_UpperCAmelCase : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase_ (UpperCamelCase__ : np.ndarray ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowerCamelCase_ (UpperCamelCase__ : np.ndarray ):
return (gray > 127) & (gray <= 255)
def lowerCamelCase_ (UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
_UpperCAmelCase : Tuple = np.zeros_like(UpperCamelCase__ )
_UpperCAmelCase : List[str] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase : str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowerCAmelCase :Tuple = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_lowerCAmelCase :Optional[int] = np.array(Image.open(lena_path))
# kernel to be applied
_lowerCAmelCase :List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowerCAmelCase :Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowerCAmelCase :int = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __lowerCAmelCase ( A ) -> List[str]:
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self ) -> List[str]:
raise NotImplementedError()
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase :int = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =['''pixel_values''']
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = None , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
_UpperCAmelCase : str = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase : Optional[Any] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Any = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase : List[str] = get_size_dict(A )
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : Any = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : str = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : Union[str, Any] = do_rescale
_UpperCAmelCase : Any = rescale_factor
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase : Any = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
_UpperCAmelCase : Any = get_size_dict(A )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __lowerCAmelCase ( self , A , A , A = None , **A ) -> np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def __lowerCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> Optional[Any]:
_UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : str = size if size is not None else self.size
_UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(A )
_UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : str = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase : str = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : Union[str, Any] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[int] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_UpperCAmelCase : int = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_UpperCAmelCase : Union[str, Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
_UpperCAmelCase : List[str] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : Tuple = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_lowerCAmelCase :Tuple = int(input('Enter number: ').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase :Optional[int] = 1.054_571_817E-34 # unit of ℏ : J * s
_lowerCAmelCase :Optional[int] = 3E8 # unit of c : m * s^-1
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
_UpperCAmelCase : List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCAmelCase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCAmelCase : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase :Dict = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowerCAmelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 1 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_ (UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase : Tuple = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase : int = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase : str = False
break
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool"
return status
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase : int = list(range(2 , n + 1 ) )
_UpperCAmelCase : List[str] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase : Any = 0
# filters actual prime numbers.
_UpperCAmelCase : Optional[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase : Optional[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase__ ):
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : str = number
if number == 0 or number == 1:
ans.append(UpperCamelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase__ ):
while quotient != 1:
if is_prime(UpperCamelCase__ ) and (quotient % factor == 0):
ans.append(UpperCamelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : List[str] = 0
# prime factorization of 'number'
_UpperCAmelCase : int = prime_factorization(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = max(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Any ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : List[Any] = 0
# prime factorization of 'number'
_UpperCAmelCase : List[Any] = prime_factorization(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = min(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ (UpperCamelCase__ : Any ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ )
), "'number' must been an int, even and > 2"
_UpperCAmelCase : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase : Tuple = get_prime_numbers(UpperCamelCase__ )
_UpperCAmelCase : int = len(UpperCamelCase__ )
# run variable for while-loops.
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Any = None
# exit variable. for break up the loops
_UpperCAmelCase : Dict = True
while i < len_pn and loop:
_UpperCAmelCase : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (len(UpperCamelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : Any = 0
while numbera != 0:
_UpperCAmelCase : Dict = numbera % numbera
_UpperCAmelCase : Optional[int] = numbera
_UpperCAmelCase : Optional[int] = rest
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase : Any = prime_factorization(UpperCamelCase__ )
_UpperCAmelCase : Any = prime_factorization(UpperCamelCase__ )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : int = []
_UpperCAmelCase : int = max(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[int] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase : Tuple = prime_fac_a.count(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = prime_fac_a.count(UpperCamelCase__ )
for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ):
ans *= n
else:
_UpperCAmelCase : Dict = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase : Any = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase__ ):
ans += 1
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime(
UpperCamelCase__ ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str ):
assert (
is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase : str = p_number_a + 1 # jump to the next number
_UpperCAmelCase : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and ans[0] != p_number_a
and ans[len(UpperCamelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase__ )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase : Any = get_divisors(UpperCamelCase__ )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase : Dict = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase : List[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ (UpperCamelCase__ : str ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Dict = 1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase : Any = ans
ans += fiba
_UpperCAmelCase : str = tmp
return ans
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
"""simple docstring"""
import math
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = F'Input value of [number={number}] must be an integer'
raise TypeError(UpperCamelCase__ )
if number < 1:
_UpperCAmelCase : int = F'Input value of [number={number}] must be > 0'
raise ValueError(UpperCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase : List[str] = int(math.log(number // 3 , 2 ) ) + 2
_UpperCAmelCase : Union[str, Any] = [3, 5]
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Dict = 3
for block in range(1 , UpperCamelCase__ ):
for _ in range(UpperCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowerCAmelCase :Optional[int] = 0
try:
_lowerCAmelCase :Dict = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=2 , A=5_6 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=2 , A=2 , A=7 , A="gelu_new" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=4 , A="block_sparse" , A=True , A=False , A=2 , A=3 , ) -> Tuple:
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Any = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Tuple = num_choices
_UpperCAmelCase : str = rescale_embeddings
_UpperCAmelCase : int = attention_type
_UpperCAmelCase : List[str] = use_bias
_UpperCAmelCase : Optional[int] = block_size
_UpperCAmelCase : Any = num_random_blocks
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) -> Any:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase : int = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Optional[int] = self._prepare_for_class(A , A )
_UpperCAmelCase : Tuple = model_class(A )
@jax.jit
def model_jitted(A , A=None , **A ):
return model(input_ids=A , attention_mask=A , **A )
with self.subTest('''JIT Enabled''' ):
_UpperCAmelCase : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCAmelCase : Tuple = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self , A , A , A , A=1E-5 , A="outputs" , A=None ) -> Union[str, Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(A , A , A , A , A , A )
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
_UpperCAmelCase : Optional[int] = ksize + 1
_UpperCAmelCase : Union[str, Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
_UpperCAmelCase : Tuple = x - ksize // 2
_UpperCAmelCase : List[str] = y - ksize // 2
# degree to radiant
_UpperCAmelCase : str = theta / 180 * np.pi
_UpperCAmelCase : List[str] = np.cos(_theta )
_UpperCAmelCase : Optional[Any] = np.sin(_theta )
# get kernel x
_UpperCAmelCase : str = cos_theta * px + sin_theta * py
# get kernel y
_UpperCAmelCase : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCAmelCase : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowerCAmelCase :Dict = imread('../image_data/lena.jpg')
# turn image in gray scale value
_lowerCAmelCase :List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowerCAmelCase :List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowerCAmelCase :str = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowerCAmelCase :List[Any] = out / out.max() * 255
_lowerCAmelCase :Tuple = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def __lowerCAmelCase ( self , **A ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> int:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> str:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_UpperCAmelCase : str = self.get_image_processor(do_normalize=A )
_UpperCAmelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : List[Any] = image_processor(A , return_tensors='''np''' )
_UpperCAmelCase : List[Any] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCAmelCase : List[Any] = processor(text=A )
_UpperCAmelCase : Union[str, Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(A )
_UpperCAmelCase : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Tuple = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =''''''
a__ =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a__ =None # compression type in fsspec. ex: "gzip"
a__ =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , A = "" , A = None , A = None , **A ) -> Optional[Any]:
super().__init__(self , **A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_UpperCAmelCase : Optional[Any] = fsspec.open(
A , mode='''rb''' , protocol=A , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_UpperCAmelCase : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
_UpperCAmelCase : Any = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
_UpperCAmelCase : Any = None
@classmethod
def __lowerCAmelCase ( cls , A ) -> Tuple:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A ).lstrip('''/''' )
def __lowerCAmelCase ( self ) -> Any:
if self.dir_cache is None:
_UpperCAmelCase : Dict = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
_UpperCAmelCase : Tuple = {f['''name''']: f}
def __lowerCAmelCase ( self , A ) -> Tuple:
return self.file.open().read()
def __lowerCAmelCase ( self , A , A = "rb" , A=None , A=True , A=None , **A , ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = self._strip_protocol(A )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''bz2'''
a__ ='''bz2'''
a__ ='''.bz2'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''gzip'''
a__ ='''gzip'''
a__ ='''.gz'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''lz4'''
a__ ='''lz4'''
a__ ='''.lz4'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''xz'''
a__ ='''xz'''
a__ ='''.xz'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''zstd'''
a__ ='''zstd'''
a__ ='''.zst'''
def __init__( self , A , A = "rb" , A = None , A = None , A = DEFAULT_BLOCK_SIZE , **A , ) -> str:
super().__init__(
fo=A , mode=A , target_protocol=A , target_options=A , block_size=A , **A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_UpperCAmelCase : int = self.file.__enter__
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = file_
def __enter__( self ) -> List[str]:
self._file.__enter__()
return self
def __exit__( self , *A , **A ) -> Union[str, Any]:
self._file.__exit__(*A , **A )
def __iter__( self ) -> Any:
return iter(self._file )
def __lowerCAmelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self , A ) -> List[Any]:
return getattr(self._file , A )
def fixed_enter(*A , **A ):
return WrappedFile(_enter(*A , **A ) )
_UpperCAmelCase : Tuple = fixed_enter
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
@experimental
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return _map_with_joblib(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
_UpperCAmelCase : Any = num_proc if num_proc <= len(UpperCamelCase__ ) else len(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = len(UpperCamelCase__ ) // num_proc
_UpperCAmelCase : Optional[int] = len(UpperCamelCase__ ) % num_proc
_UpperCAmelCase : int = div * index + min(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCamelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(UpperCamelCase__ )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(UpperCamelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = None, None
if not disable_tqdm:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(UpperCamelCase__ , initargs=UpperCamelCase__ , initializer=UpperCamelCase__ ) as pool:
_UpperCAmelCase : Optional[Any] = pool.map(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F'Finished {num_proc} processes' )
_UpperCAmelCase : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(UpperCamelCase__ )} objects' )
return mapped
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCamelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCamelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCAmelCase : Dict = None
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase : List[str] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : str = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : Optional[int] = size
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Union[str, Any] = image_std
_UpperCAmelCase : int = do_rescale
_UpperCAmelCase : str = rescale_factor
_UpperCAmelCase : str = do_pad
def __lowerCAmelCase ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , A , A=False ) -> List[str]:
if not batched:
_UpperCAmelCase : List[Any] = image_inputs[0]
if isinstance(A , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : str = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : List[str] = int(self.size['''shortest_edge'''] * h / w )
_UpperCAmelCase : Optional[int] = self.size['''shortest_edge''']
elif w > h:
_UpperCAmelCase : Dict = self.size['''shortest_edge''']
_UpperCAmelCase : List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCAmelCase : Union[str, Any] = self.size['''shortest_edge''']
_UpperCAmelCase : Optional[int] = self.size['''shortest_edge''']
else:
_UpperCAmelCase : Any = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : List[str] = max(A , key=lambda A : item[0] )[0]
_UpperCAmelCase : Optional[int] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =YolosImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[Any] = YolosImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
_UpperCAmelCase : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Tuple:
# Initialize image_processing
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> List[str]:
# Initialize image_processings
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : Dict = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_UpperCAmelCase : int = image_processing_a.pad(A , return_tensors='''pt''' )
_UpperCAmelCase : Optional[int] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
# prepare image and target
_UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCAmelCase : Optional[int] = json.loads(f.read() )
_UpperCAmelCase : List[str] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
_UpperCAmelCase : Tuple = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_UpperCAmelCase : Optional[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
_UpperCAmelCase : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
_UpperCAmelCase : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
_UpperCAmelCase : Dict = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
_UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
_UpperCAmelCase : Dict = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
_UpperCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
_UpperCAmelCase : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
_UpperCAmelCase : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
_UpperCAmelCase : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
# prepare image, target and masks_path
_UpperCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCAmelCase : Any = json.loads(f.read() )
_UpperCAmelCase : Optional[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
_UpperCAmelCase : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCAmelCase : Dict = YolosImageProcessor(format='''coco_panoptic''' )
_UpperCAmelCase : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
_UpperCAmelCase : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
_UpperCAmelCase : List[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
_UpperCAmelCase : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
_UpperCAmelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
_UpperCAmelCase : Dict = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
_UpperCAmelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
_UpperCAmelCase : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
_UpperCAmelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
_UpperCAmelCase : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
_UpperCAmelCase : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , ) -> Optional[Any]:
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : int = 1_3
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : Dict = True
_UpperCAmelCase : int = True
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : Tuple = 9_9
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : Any = 0.1
_UpperCAmelCase : Any = 0.1
_UpperCAmelCase : str = 5_1_2
_UpperCAmelCase : str = 1_6
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = 0.02
_UpperCAmelCase : Any = 3
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : int = '''last'''
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = 0
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_UpperCAmelCase : int = None
if self.use_input_lengths:
_UpperCAmelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase : Dict = None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : str = None
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> List[str]:
_UpperCAmelCase : Tuple = TFFlaubertModel(config=A )
_UpperCAmelCase : str = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase : int = model(A )
_UpperCAmelCase : Optional[int] = [input_ids, input_mask]
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
_UpperCAmelCase : Tuple = TFFlaubertWithLMHeadModel(A )
_UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_UpperCAmelCase : Dict = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
_UpperCAmelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
_UpperCAmelCase : Tuple = TFFlaubertForSequenceClassification(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Dict = TFFlaubertForTokenClassification(config=A )
_UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = self.num_choices
_UpperCAmelCase : Union[str, Any] = TFFlaubertForMultipleChoice(config=A )
_UpperCAmelCase : Any = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : str = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = config_and_inputs
_UpperCAmelCase : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ =(
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ =False
a__ =False
def __lowerCAmelCase ( self , A , A , A , A , A ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = TFFlaubertModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A , emb_dim=3_7 )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Dict = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_UpperCAmelCase : Dict = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_UpperCAmelCase : Union[str, Any] = model(A )[0]
_UpperCAmelCase : str = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
_UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''facebook/bart-large-mnli'''
a__ =(
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ ='''text_classifier'''
a__ =AutoTokenizer
a__ =AutoModelForSequenceClassification
a__ =['''text''', ['''text''']]
a__ =['''text''']
def __lowerCAmelCase ( self ) -> Any:
super().setup()
_UpperCAmelCase : int = self.model.config
_UpperCAmelCase : int = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_UpperCAmelCase : Optional[int] = int(A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : Dict = labels
return self.pre_processor(
[text] * len(A ) , [f'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : str = outputs.logits
_UpperCAmelCase : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str = "cpu" , UpperCamelCase__ : Union[str, None] = None ):
_UpperCAmelCase : Tuple = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCamelCase__ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_UpperCAmelCase : int = v.half()
if save_path is None: # overwrite src_path
_UpperCAmelCase : str = src_path
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.