code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class _SCREAMING_SNAKE_CASE ( a_ ):
def __init__( self , **__A ) -> Optional[int]:
super().__init__(**__A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __A , **__A ) -> Optional[int]:
return super().__call__(__A , **__A )
def __lowerCAmelCase ( self , **__A ) -> Dict:
lowerCAmelCase_ :Optional[int] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase_ :Optional[int] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase_ :List[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , __A , __A=None , __A="This is a photo of {}." ) -> Tuple:
lowerCAmelCase_ :Dict = load_image(__A )
lowerCAmelCase_ :Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase_ :List[str] = candidate_labels
lowerCAmelCase_ :str = [hypothesis_template.format(__A ) for x in candidate_labels]
lowerCAmelCase_ :Optional[Any] = self.tokenizer(__A , return_tensors=self.framework , padding=__A )
lowerCAmelCase_ :Tuple = [text_inputs]
return inputs
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Any = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase_ :Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __A ):
lowerCAmelCase_ :Dict = text_inputs[0]
else:
# Batching case.
lowerCAmelCase_ :Union[str, Any] = text_inputs[0][0]
lowerCAmelCase_ :Dict = self.model(**__A , **__A )
lowerCAmelCase_ :Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :List[str] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase_ :str = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase_ :int = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase_ :int = probs.tolist()
if not isinstance(__A , __A ):
lowerCAmelCase_ :Tuple = [scores]
elif self.framework == "tf":
lowerCAmelCase_ :Dict = stable_softmax(__A , axis=-1 )
lowerCAmelCase_ :str = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowerCAmelCase_ :Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__A , __A ) , key=lambda __A : -x[0] )
]
return result
| 84
|
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] ) -> Union[str, Any]:
snake_case__ = [
[],
[],
[],
]
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCamelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self: Union[str, Any] ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Union[str, Any] ) -> Any:
snake_case__ = []
def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None:
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case__ = min(self.queue )
self.queue.remove(UpperCamelCase )
return data
def __str__( self: Optional[Any] ) -> str:
return str(self.queue )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 307
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( a_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = GPTSanJapaneseTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = {'do_clean_text': False, 'add_prefix_space': False}
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase__ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase__ = {"unk_token": "<unk>"}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__UpperCAmelCase ) )
def lowercase_ (self : Dict , **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase__ = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase__ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase__ = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase__ = "こんにちは、世界。"
UpperCAmelCase__ = "こんばんは、㔺界。😀"
UpperCAmelCase__ = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , prefix_text=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase_ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase__ = "こんにちは、世界。"
UpperCAmelCase__ = "こんばんは、㔺界。😀"
UpperCAmelCase__ = len(tokenizer.encode(__UpperCAmelCase ) ) - 2
UpperCAmelCase__ = len(tokenizer.encode(__UpperCAmelCase ) ) - 2
UpperCAmelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , prefix_text=__UpperCAmelCase ).token_type_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase__ = tokenizer.encode("あンいワ" )
UpperCAmelCase__ = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase__ = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__UpperCAmelCase ) , tokenizer.decode(__UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(__UpperCAmelCase ) , tokenizer.decode(__UpperCAmelCase ) )
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase__ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_encode_plus(__UpperCAmelCase , padding=__UpperCAmelCase )
# fmt: off
UpperCAmelCase__ = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
UpperCAmelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , __UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , __UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , __UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
pass
def lowercase_ (self : str ) -> str:
"""simple docstring"""
pass
| 65
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "LayoutLMv2ImageProcessor"
_UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['words']
snake_case__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
snake_case__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
snake_case__ = images
return encoded_inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case : Optional[int] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__snake_case : Any = {
"""yjernite/retribert-base-uncased""": 512,
}
__snake_case : Optional[Any] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class A__ ( a_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = RetriBertTokenizer
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]="[UNK]" , _SCREAMING_SNAKE_CASE: Tuple="[SEP]" , _SCREAMING_SNAKE_CASE: List[str]="[PAD]" , _SCREAMING_SNAKE_CASE: Optional[int]="[CLS]" , _SCREAMING_SNAKE_CASE: Union[str, Any]="[MASK]" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , _SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get("strip_accents" , _SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__lowerCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("type"))
__lowerCAmelCase : Union[str, Any] = do_lower_case
__lowerCAmelCase : Tuple = strip_accents
__lowerCAmelCase : Dict = tokenize_chinese_chars
__lowerCAmelCase : List[str] = normalizer_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE)
return tuple(_SCREAMING_SNAKE_CASE)
| 269
|
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
|
import os
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 71
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = """hf-internal-testing/tiny-random-t5"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokenizer("""This is me""" , return_tensors="""pt""" )
__lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowerCAmelCase = model.generate(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowerCAmelCase = model_reloaded.generate(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : int ) -> Optional[Any]:
__lowerCAmelCase = """hf-internal-testing/tiny-random-t5"""
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
| 229
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Any = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
__UpperCamelCase : Optional[Any] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def a_ ( _A , _A=1 , _A=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a_ ( _A ) -> int:
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def a_ ( _A , _A ) -> int:
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def a_ ( _A , _A , _A , _A=True ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
snake_case__ = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
snake_case__ = read_json(os.path.join(_A , 'params.json' ) )
snake_case__ = NUM_SHARDS[model_size]
snake_case__ = params['n_layers']
snake_case__ = params['n_heads']
snake_case__ = n_heads // num_shards
snake_case__ = params['dim']
snake_case__ = dim // n_heads
snake_case__ = 10000.0
snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case__ = params['n_kv_heads'] # for GQA / MQA
snake_case__ = n_heads_per_shard // num_key_value_heads
snake_case__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case__ = n_heads
snake_case__ = n_heads_per_shard
snake_case__ = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
snake_case__ = [
torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(_A )
]
snake_case__ = 0
snake_case__ = {'weight_map': {}}
for layer_i in range(_A ):
snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case__ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
snake_case__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
snake_case__ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 )
snake_case__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 )
snake_case__ = inv_freq
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
snake_case__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case__ = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
snake_case__ = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256
snake_case__ = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Initialize the tokenizer based on the `spm` model
snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case__ = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def a_ ( ) -> str:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
snake_case__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 307
| 0
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE( a_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.floataa
SCREAMING_SNAKE_CASE_ : List[Any] = True
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setup()
__SCREAMING_SNAKE_CASE :Union[str, Any] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = super().__call__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE( a_ ):
SCREAMING_SNAKE_CASE_ : Any = FlaxBigBirdForNaturalQuestionsModule
def __lowerCamelCase ( a_ : Optional[Any] , a_ : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Any , a_ : Optional[Any] ) -> List[str]:
def cross_entropy(a_ : Dict , a_ : Tuple , a_ : Optional[int]=None ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = logits.shape[-1]
__SCREAMING_SNAKE_CASE :str = (labels[..., None] == jnp.arange(_A )[None]).astype('''f4''' )
__SCREAMING_SNAKE_CASE :Tuple = jax.nn.log_softmax(_A , axis=-1 )
__SCREAMING_SNAKE_CASE :Optional[int] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = reduction(_A )
return loss
__SCREAMING_SNAKE_CASE :List[str] = partial(_A , reduction=jnp.mean )
__SCREAMING_SNAKE_CASE :Union[str, Any] = cross_entropy(_A , _A )
__SCREAMING_SNAKE_CASE :Tuple = cross_entropy(_A , _A )
__SCREAMING_SNAKE_CASE :Any = cross_entropy(_A , _A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Optional[int] = '''google/bigbird-roberta-base'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3000
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10500
SCREAMING_SNAKE_CASE_ : str = 128
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Any = 5
# tx_args
SCREAMING_SNAKE_CASE_ : Dict = 3E-5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
SCREAMING_SNAKE_CASE_ : Dict = 20000
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0095
SCREAMING_SNAKE_CASE_ : Tuple = '''bigbird-roberta-natural-questions'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''training-expt'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''data/nq-training.jsonl'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''data/nq-validation.jsonl'''
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
os.makedirs(self.base_dir ,exist_ok=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(self.base_dir ,self.save_dir )
__SCREAMING_SNAKE_CASE :Tuple = self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Tuple = 42
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4096 # no dynamic padding on TPUs
def __call__( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.collate_fn(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return batch
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.fetch_inputs(features['''input_ids'''] )
__SCREAMING_SNAKE_CASE :Tuple = {
'''input_ids''': jnp.array(SCREAMING_SNAKE_CASE__ ,dtype=jnp.intaa ),
'''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE__ ,dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] ,dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] ,dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] ,dtype=jnp.intaa ),
}
return batch
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = [self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )]
while len(SCREAMING_SNAKE_CASE__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : str , a_ : str=None ) -> Union[str, Any]:
if seed is not None:
__SCREAMING_SNAKE_CASE :int = dataset.shuffle(seed=_A )
for i in range(len(_A ) // batch_size ):
__SCREAMING_SNAKE_CASE :List[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_A )
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : List[str] , **a_ : List[Any] ) -> int:
def loss_fn(a_ : Dict ):
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''start_labels''' )
__SCREAMING_SNAKE_CASE :Any = model_inputs.pop('''end_labels''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = model_inputs.pop('''pooled_labels''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = state.apply_fn(**_A , params=_A , dropout_rng=_A , train=_A )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = outputs
return state.loss_fn(
_A , _A , _A , _A , _A , _A , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = jax.random.split(_A )
__SCREAMING_SNAKE_CASE :List[Any] = jax.value_and_grad(_A )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = grad_fn(state.params )
__SCREAMING_SNAKE_CASE :int = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__SCREAMING_SNAKE_CASE :int = jax.lax.pmean(_A , '''batch''' )
__SCREAMING_SNAKE_CASE :Dict = state.apply_gradients(grads=_A )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCamelCase ( a_ : Dict , **a_ : int ) -> Any:
__SCREAMING_SNAKE_CASE :str = model_inputs.pop('''start_labels''' )
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''end_labels''' )
__SCREAMING_SNAKE_CASE :List[Any] = model_inputs.pop('''pooled_labels''' )
__SCREAMING_SNAKE_CASE :Optional[int] = state.apply_fn(**_A , params=state.params , train=_A )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = outputs
__SCREAMING_SNAKE_CASE :Optional[int] = state.loss_fn(_A , _A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE :Union[str, Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _SCREAMING_SNAKE_CASE( train_state.TrainState ):
SCREAMING_SNAKE_CASE_ : Optional[int] = struct.field(pytree_node=a_ )
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Dict = 42
SCREAMING_SNAKE_CASE_ : List[Any] = 42
SCREAMING_SNAKE_CASE_ : Tuple = 42
SCREAMING_SNAKE_CASE_ : List[str] = 42
SCREAMING_SNAKE_CASE_ : Dict = 42
SCREAMING_SNAKE_CASE_ : Optional[int] = 42
SCREAMING_SNAKE_CASE_ : int = None
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = model.params
__SCREAMING_SNAKE_CASE :Union[str, Any] = TrainState.create(
apply_fn=model.__call__ ,params=SCREAMING_SNAKE_CASE__ ,tx=SCREAMING_SNAKE_CASE__ ,loss_fn=SCREAMING_SNAKE_CASE__ ,)
if ckpt_dir is not None:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = restore_checkpoint(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = build_tx(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = train_state.TrainState(
step=SCREAMING_SNAKE_CASE__ ,apply_fn=model.__call__ ,params=SCREAMING_SNAKE_CASE__ ,tx=SCREAMING_SNAKE_CASE__ ,opt_state=SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :str = args
__SCREAMING_SNAKE_CASE :str = data_collator
__SCREAMING_SNAKE_CASE :Union[str, Any] = lr
__SCREAMING_SNAKE_CASE :List[Any] = params
__SCREAMING_SNAKE_CASE :Optional[Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE__ )
return state
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.args
__SCREAMING_SNAKE_CASE :Optional[int] = len(SCREAMING_SNAKE_CASE__ ) // args.batch_size
__SCREAMING_SNAKE_CASE :str = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE__ ,jax.device_count() )
for epoch in range(args.max_epochs ):
__SCREAMING_SNAKE_CASE :int = jnp.array(0 ,dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE :Any = get_batched_dataset(SCREAMING_SNAKE_CASE__ ,args.batch_size ,seed=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ ,total=SCREAMING_SNAKE_CASE__ ,desc=f'''Running EPOCH-{epoch}''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.data_collator(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = self.train_step_fn(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__SCREAMING_SNAKE_CASE :Optional[int] = jax_utils.unreplicate(state.step )
__SCREAMING_SNAKE_CASE :List[str] = running_loss.item() / i
__SCREAMING_SNAKE_CASE :Tuple = self.scheduler_fn(state_step - 1 )
__SCREAMING_SNAKE_CASE :List[Any] = self.evaluate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE__ ) )
self.logger.log(SCREAMING_SNAKE_CASE__ ,commit=SCREAMING_SNAKE_CASE__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' ,state=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = get_batched_dataset(SCREAMING_SNAKE_CASE__ ,self.args.batch_size )
__SCREAMING_SNAKE_CASE :Tuple = len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size
__SCREAMING_SNAKE_CASE :Tuple = jnp.array(0 ,dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ ,total=SCREAMING_SNAKE_CASE__ ,desc='''Evaluating ... ''' ):
__SCREAMING_SNAKE_CASE :str = self.data_collator(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.val_step_fn(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' ,end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE__ ,params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''opt_state.msgpack''' ) ,'''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(SCREAMING_SNAKE_CASE__ ,'''args.joblib''' ) )
joblib.dump(self.data_collator ,os.path.join(SCREAMING_SNAKE_CASE__ ,'''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''training_state.json''' ) ,'''w''' ) as f:
json.dump({'''step''': state.step.item()} ,SCREAMING_SNAKE_CASE__ )
print('''DONE''' )
def __lowerCamelCase ( a_ : Tuple , a_ : Tuple ) -> Any:
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(_A , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :List[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(_A , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = from_bytes(state.opt_state , f.read() )
__SCREAMING_SNAKE_CASE :List[str] = joblib.load(os.path.join(_A , '''args.joblib''' ) )
__SCREAMING_SNAKE_CASE :List[str] = joblib.load(os.path.join(_A , '''data_collator.joblib''' ) )
with open(os.path.join(_A , '''training_state.json''' ) , '''r''' ) as f:
__SCREAMING_SNAKE_CASE :List[Any] = json.load(_A )
__SCREAMING_SNAKE_CASE :int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : str , a_ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :str = num_train_steps - warmup_steps
__SCREAMING_SNAKE_CASE :Any = optax.linear_schedule(init_value=_A , end_value=_A , transition_steps=_A )
__SCREAMING_SNAKE_CASE :int = optax.linear_schedule(init_value=_A , end_value=1e-7 , transition_steps=_A )
__SCREAMING_SNAKE_CASE :int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Dict , a_ : Any ) -> List[Any]:
def weight_decay_mask(a_ : int ):
__SCREAMING_SNAKE_CASE :str = traverse_util.flatten_dict(_A )
__SCREAMING_SNAKE_CASE :Union[str, Any] = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_A )
__SCREAMING_SNAKE_CASE :Any = scheduler_fn(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE :Tuple = optax.adamw(learning_rate=_A , weight_decay=_A , mask=_A )
return tx, lr
| 191
|
import os
import string
import sys
__UpperCamelCase : List[Any] = 1 << 8
__UpperCamelCase : Union[str, Any] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__UpperCamelCase : Optional[Any] = KEYMAP["""up"""]
__UpperCamelCase : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def a_ ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case__ = chr(KEYMAP['esc'] )
except KeyError:
snake_case__ = cha[1]
else:
snake_case__ = ch.decode(_A )
else:
snake_case__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ = sys.stdin.fileno()
snake_case__ = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case__ = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case__ = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case_ : int = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=None ):
require_version(deps[pkg] , _A )
| 83
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptsan-japanese"
_UpperCAmelCase = [
"past_key_values",
]
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]:
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = d_ff
snake_case__ = d_ext
snake_case__ = d_spout
snake_case__ = num_switch_layers
snake_case__ = num_ext_layers
snake_case__ = num_switch_layers + num_ext_layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = router_bias
snake_case__ = router_jitter_noise
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = output_hidden_states
snake_case__ = output_attentions
snake_case__ = initializer_factor
snake_case__ = output_router_logits
snake_case__ = use_cache
super().__init__(
separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
| 307
| 0
|
def lowerCamelCase_ ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_A , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }")
| 345
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __A ( a_ ):
"""simple docstring"""
__lowerCAmelCase = "align_text_model"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-1_2 , __A=0 , __A="absolute" , __A=True , **__A , ) -> List[str]:
super().__init__(**__A )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =pad_token_id
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
a , a =cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __A ( a_ ):
"""simple docstring"""
__lowerCAmelCase = "align_vision_model"
def __init__( self , __A = 3 , __A = 600 , __A = 2.0 , __A = 3.1 , __A = 8 , __A = [3, 3, 5, 3, 5, 5, 3] , __A = [32, 16, 24, 40, 80, 112, 192] , __A = [16, 24, 40, 80, 112, 192, 320] , __A = [] , __A = [1, 2, 2, 2, 1, 2, 1] , __A = [1, 2, 2, 3, 3, 4, 1] , __A = [1, 6, 6, 6, 6, 6, 6] , __A = 0.25 , __A = "swish" , __A = 2560 , __A = "mean" , __A = 0.02 , __A = 0.001 , __A = 0.99 , __A = 0.2 , **__A , ) -> Optional[Any]:
super().__init__(**__A )
a =num_channels
a =image_size
a =width_coefficient
a =depth_coefficient
a =depth_divisor
a =kernel_sizes
a =in_channels
a =out_channels
a =depthwise_padding
a =strides
a =num_block_repeats
a =expand_ratios
a =squeeze_expansion_ratio
a =hidden_act
a =hidden_dim
a =pooling_type
a =initializer_range
a =batch_norm_eps
a =batch_norm_momentum
a =drop_connect_rate
a =sum(__A ) * 4
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
a , a =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
a =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class __A ( a_ ):
"""simple docstring"""
__lowerCAmelCase = "align"
__lowerCAmelCase = True
def __init__( self , __A=None , __A=None , __A=640 , __A=1.0 , __A=0.02 , **__A , ) -> Any:
super().__init__(**__A )
if text_config is None:
a ={}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
a ={}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
a =AlignTextConfig(**__A )
a =AlignVisionConfig(**__A )
a =projection_dim
a =temperature_init_value
a =initializer_range
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , __A , **__A ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =copy.deepcopy(self.__dict__ )
a =self.text_config.to_dict()
a =self.vision_config.to_dict()
a =self.__class__.model_type
return output
| 81
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Any):
a : List[str] = data
a : Optional[int] = None
class _A :
"""simple docstring"""
def __init__( self : Union[str, Any]):
a : List[Any] = None
a : List[str] = None
def __iter__( self : List[Any]):
a : List[str] = self.head
while self.head:
yield node.data
a : int = node.next
if node == self.head:
break
def __len__( self : Optional[int]):
return sum(1 for _ in self)
def __repr__( self : Tuple):
return "->".join(str(__UpperCAmelCase) for item in iter(self))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any):
self.insert_nth(len(self) , __UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.insert_nth(0 , __UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
a : Any = Node(__UpperCAmelCase)
if self.head is None:
a : Optional[Any] = new_node # first node points itself
a : Optional[int] = new_node
elif index == 0: # insert at head
a : Dict = self.head
a : List[Any] = new_node
else:
a : str = self.head
for _ in range(index - 1):
a : Tuple = temp.next
a : Any = temp.next
a : List[Any] = new_node
if index == len(self) - 1: # insert at tail
a : Optional[Any] = new_node
def __snake_case ( self : str):
return self.delete_nth(0)
def __snake_case ( self : Dict):
return self.delete_nth(len(self) - 1)
def __snake_case ( self : Dict , __UpperCAmelCase : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
a : List[Any] = self.head
if self.head == self.tail: # just one node
a : List[str] = None
elif index == 0: # delete head node
a : Optional[Any] = self.tail.next.next
a : Any = self.head.next
else:
a : List[str] = self.head
for _ in range(index - 1):
a : Union[str, Any] = temp.next
a : Optional[Any] = temp.next
a : Union[str, Any] = temp.next.next
if index == len(self) - 1: # delete at tail
a : Any = temp
return delete_node.data
def __snake_case ( self : Optional[Any]):
return len(self) == 0
def lowercase ( )-> None:
'''simple docstring'''
a : Tuple = CircularLinkedList()
assert len(_A ) == 0
assert circular_linked_list.is_empty() is True
assert str(_A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_A ) == i
circular_linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Tuple = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
snake_case__ = legacy
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = extra_ids
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , )
return max_model_length
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]:
if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self: Union[str, Any] ) -> List[str]:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]:
snake_case__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' )
return super().tokenize(UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str:
if not self.legacy:
snake_case__ = text.startswith(UpperCamelCase )
if is_first:
snake_case__ = text[1:]
snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ):
snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict:
if token.startswith('<extra_id_' ):
snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase )
snake_case__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple:
if index < self.sp_model.get_piece_size():
snake_case__ = self.sp_model.IdToPiece(UpperCamelCase )
else:
snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict:
snake_case__ = []
snake_case__ = ''
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(UpperCamelCase )
snake_case__ = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 307
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( a_ ):
UpperCAmelCase_ :Optional[Any] = "unispeech"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=80 , __A=0 , __A=1 , __A=2 , __A=0.5 , **__A , ) -> Tuple:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCAmelCase_ :Optional[Any] = hidden_size
lowerCAmelCase_ :List[Any] = feat_extract_norm
lowerCAmelCase_ :Optional[Any] = feat_extract_activation
lowerCAmelCase_ :Tuple = list(__A )
lowerCAmelCase_ :Union[str, Any] = list(__A )
lowerCAmelCase_ :List[str] = list(__A )
lowerCAmelCase_ :int = conv_bias
lowerCAmelCase_ :Tuple = num_conv_pos_embeddings
lowerCAmelCase_ :Dict = num_conv_pos_embedding_groups
lowerCAmelCase_ :int = len(self.conv_dim )
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = intermediate_size
lowerCAmelCase_ :Tuple = hidden_act
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Optional[Any] = hidden_dropout
lowerCAmelCase_ :Union[str, Any] = attention_dropout
lowerCAmelCase_ :Any = activation_dropout
lowerCAmelCase_ :Optional[Any] = feat_proj_dropout
lowerCAmelCase_ :Optional[Any] = final_dropout
lowerCAmelCase_ :List[str] = layerdrop
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :Tuple = num_ctc_classes
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :List[Any] = use_weighted_layer_sum
lowerCAmelCase_ :Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :Optional[Any] = apply_spec_augment
lowerCAmelCase_ :str = mask_time_prob
lowerCAmelCase_ :Optional[Any] = mask_time_length
lowerCAmelCase_ :Union[str, Any] = mask_time_min_masks
lowerCAmelCase_ :List[Any] = mask_feature_prob
lowerCAmelCase_ :Optional[Any] = mask_feature_length
lowerCAmelCase_ :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :List[str] = num_codevectors_per_group
lowerCAmelCase_ :Dict = num_codevector_groups
lowerCAmelCase_ :Optional[Any] = contrastive_logits_temperature
lowerCAmelCase_ :Union[str, Any] = feat_quantizer_dropout
lowerCAmelCase_ :str = num_negatives
lowerCAmelCase_ :Union[str, Any] = codevector_dim
lowerCAmelCase_ :int = proj_codevector_dim
lowerCAmelCase_ :int = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase_ :Tuple = ctc_zero_infinity
# pretraining loss
lowerCAmelCase_ :str = replace_prob
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 84
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def lowerCAmelCase_ ( __A, __A = 16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue", "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_A, max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_A, batched=_A, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_A, padding="longest", max_length=_A, pad_to_multiple_of=_A, return_tensors="pt", )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"], shuffle=_A, collate_fn=_A, batch_size=_A )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"], shuffle=_A, collate_fn=_A, batch_size=_A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS", _A ) == "1":
UpperCAmelCase__ = 2
# Initialize accelerator
UpperCAmelCase__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_A, _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters(), lr=_A )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_A, num_warmup_steps=100, num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_A, _A, _A, _A, _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_A )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase__ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_A )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_A, references=_A, )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _A )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=_A, default=_A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A, _A )
if __name__ == "__main__":
main()
| 65
|
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Dict = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
__snake_case : Optional[int] = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
__snake_case : List[str] = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
with open(_A ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : str = json.loads(f.read() )
__lowerCAmelCase : Any = collections.OrderedDict()
__lowerCAmelCase : Optional[Any] = collections.OrderedDict()
__lowerCAmelCase : Optional[int] = collections.OrderedDict()
with open(_A ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : List[str] = f.readlines()
__lowerCAmelCase : int = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_A ):
__lowerCAmelCase : Dict = b
__lowerCAmelCase : Tuple = idx
for wd in b:
__lowerCAmelCase : Optional[int] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A__ ( a_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: str="<|startoftext|>" , _SCREAMING_SNAKE_CASE: int="<|endoftext|>" , _SCREAMING_SNAKE_CASE: str=False , **_SCREAMING_SNAKE_CASE: Dict , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , do_clean_text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if not os.path.isfile(_SCREAMING_SNAKE_CASE):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(_SCREAMING_SNAKE_CASE):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
__lowerCAmelCase : Tuple = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = load_vocab_and_emoji(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_SCREAMING_SNAKE_CASE , clean=self.do_clean_text)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Tuple:
"""simple docstring"""
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = "".join(_SCREAMING_SNAKE_CASE).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: "Conversation") -> List[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE) + [self.eos_token_id])
if len(_SCREAMING_SNAKE_CASE) > self.model_max_length:
__lowerCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
__lowerCAmelCase : List[str] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__lowerCAmelCase : List[str] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
__lowerCAmelCase : Tuple = token_index
writer.write(",".join(_SCREAMING_SNAKE_CASE) + "\n")
index += 1
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , _SCREAMING_SNAKE_CASE)
return vocab_file, emoji_file
class A__ ( a_ ):
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = vocab # same as swe
__lowerCAmelCase : Tuple = ids_to_tokens # same as bpe
__lowerCAmelCase : List[str] = emoji
__lowerCAmelCase : str = np.max([len(_SCREAMING_SNAKE_CASE) for w in self.vocab.keys()])
__lowerCAmelCase : List[Any] = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
__lowerCAmelCase : Optional[Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
__lowerCAmelCase : Union[str, Any] = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
__lowerCAmelCase : Dict = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
__lowerCAmelCase : Any = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
__lowerCAmelCase : Union[str, Any] = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
__lowerCAmelCase : Optional[Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__lowerCAmelCase : Dict = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__lowerCAmelCase : Union[str, Any] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Tuple) -> int:
"""simple docstring"""
return len(self.ids_to_tokens)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.content_repattera.sub("<URL>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.content_repattera.sub("<EMAIL>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.content_repattera.sub("<TEL>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub("<DATE>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.content_repattera.sub("<DATE>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.content_repattera.sub("<PRICE>" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : Optional[int] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]=False) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = text.replace(" " , "<SP>")
__lowerCAmelCase : Dict = text.replace(" " , "<SP>")
__lowerCAmelCase : Dict = text.replace("\r\n" , "<BR>")
__lowerCAmelCase : Tuple = text.replace("\n" , "<BR>")
__lowerCAmelCase : Tuple = text.replace("\r" , "<BR>")
__lowerCAmelCase : Dict = text.replace("\t" , "<TAB>")
__lowerCAmelCase : Optional[int] = text.replace("—" , "ー")
__lowerCAmelCase : List[str] = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : int = text.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if clean:
__lowerCAmelCase : List[str] = self.clean_text(_SCREAMING_SNAKE_CASE)
def check_simbol(_SCREAMING_SNAKE_CASE: List[str]):
__lowerCAmelCase : int = x.encode()
if len(_SCREAMING_SNAKE_CASE) == 1 and len(_SCREAMING_SNAKE_CASE) == 2:
__lowerCAmelCase : Dict = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE: List[str]):
__lowerCAmelCase : Tuple = x.encode()
if len(_SCREAMING_SNAKE_CASE) == 1 and len(_SCREAMING_SNAKE_CASE) == 3:
__lowerCAmelCase : str = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
__lowerCAmelCase : int = 0
__lowerCAmelCase : Tuple = []
while pos < len(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = min(len(_SCREAMING_SNAKE_CASE) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
__lowerCAmelCase : str = [] # (token_id, token, pos)
for e in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1):
__lowerCAmelCase : int = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_SCREAMING_SNAKE_CASE) > 2:
__lowerCAmelCase : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(_SCREAMING_SNAKE_CASE) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE: x[0])[0]
result.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = e
else:
__lowerCAmelCase : Any = pos + 1
__lowerCAmelCase : List[str] = text[pos:end]
if check_simbol(_SCREAMING_SNAKE_CASE):
result.append("<KIGOU>")
elif checkuae(_SCREAMING_SNAKE_CASE):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
__lowerCAmelCase : Tuple = end
return result
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str]="\n") -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = []
__lowerCAmelCase : Dict = []
__lowerCAmelCase : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(_SCREAMING_SNAKE_CASE) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE).decode("utf-8" , errors="replace"))
__lowerCAmelCase : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(_SCREAMING_SNAKE_CASE)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(_SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE).decode("utf-8" , errors="replace"))
__lowerCAmelCase : List[Any] = "".join(_SCREAMING_SNAKE_CASE)
return text
| 269
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int:
super().__init__(features=UpperCamelCase )
snake_case__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]:
import torch
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]:
import torch
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case__ = {'dtype': torch.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
snake_case__ = np.asarray(UpperCamelCase )
return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor":
snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
snake_case__ = self._consolidate(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
snake_case__ = self._consolidate(batch[column_name] )
return batch
| 307
| 0
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( a__ : Any ) -> Tuple:
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( a__ : int , a__ : List[str] ) -> Tuple:
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( a__ : Dict , a__ : List[Any] , a__ : Dict ) -> List[str]:
UpperCamelCase_ = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def lowerCamelCase__ ( a__ : Any , a__ : Tuple , a__ : Any , a__ : List[str]=7_0000 ) -> Tuple:
UpperCamelCase_ = np.zeros(x.shape[1] )
for iterations in range(_A ):
UpperCamelCase_ = np.dot(_A , _A )
UpperCamelCase_ = sigmoid_function(_A )
UpperCamelCase_ = np.dot(x.T , h - y ) / y.size
UpperCamelCase_ = theta - alpha * gradient # updating the weights
UpperCamelCase_ = np.dot(_A , _A )
UpperCamelCase_ = sigmoid_function(_A )
UpperCamelCase_ = cost_function(_A , _A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A = datasets.load_iris()
_A = iris.data[:, :2]
_A = (iris.target != 0) * 1
_A = 0.1
_A = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( a__ : List[str] ) -> List[str]:
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(_A) = (x[:, 0].min(), x[:, 0].max())
(_A) = (x[:, 1].min(), x[:, 1].max())
(_A) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A = np.c_[xxa.ravel(), xxa.ravel()]
_A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 122
|
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
A_ :str = """Usage of script: script_name <size_of_canvas:int>"""
A_ :Tuple = [0] * 100 + [1] * 10
random.shuffle(choice)
def A ( a_ ) -> list[list[bool]]:
__UpperCamelCase : Any =[[False for i in range(_A )] for j in range(_A )]
return canvas
def A ( a_ ) -> None:
for i, row in enumerate(_A ):
for j, _ in enumerate(_A ):
__UpperCamelCase : List[Any] =bool(random.getrandbits(1 ) )
def A ( a_ ) -> list[list[bool]]:
__UpperCamelCase : Tuple =np.array(_A )
__UpperCamelCase : Union[str, Any] =np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_A ):
for c, pt in enumerate(_A ):
__UpperCamelCase : Dict =__judge_point(
_A ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__UpperCamelCase : List[str] =next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__UpperCamelCase : Dict =current_canvas.tolist()
return return_canvas
def A ( a_ ,a_ ) -> bool:
__UpperCamelCase : str =0
__UpperCamelCase : Union[str, Any] =0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__UpperCamelCase : int =pt
if pt:
if alive < 2:
__UpperCamelCase : List[str] =False
elif alive == 2 or alive == 3:
__UpperCamelCase : Any =True
elif alive > 3:
__UpperCamelCase : Union[str, Any] =False
else:
if alive == 3:
__UpperCamelCase : Optional[Any] =True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
A_ :Tuple = int(sys.argv[1])
# main working structure of this module.
A_ :str = create_canvas(canvas_size)
seed(c)
A_ :List[Any] = plt.subplots()
fig.show()
A_ :int = ListedColormap(['''w''', '''k'''])
try:
while True:
A_ :Any = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 71
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(_A ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = use_karras_sigmas
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase )
snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple:
# get log sigma
snake_case__ = np.log(UpperCamelCase )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.dt is None
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self: List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 307
| 0
|
'''simple docstring'''
import os
import string
import sys
_A : List[Any] = 1 << 8
_A : Union[str, Any] = {
"""tab""": ord('''\t'''),
"""newline""": ord('''\r'''),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_A : Optional[Any] = KEYMAP["""up"""]
_A : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
_A : List[Any] = []
_A : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_A : List[str] = ord(str(i))
def UpperCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
__lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
__lowerCAmelCase = cha[1]
else:
__lowerCAmelCase = ch.decode(_A )
else:
__lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__lowerCAmelCase = sys.stdin.fileno()
__lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
__lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
__lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
__lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 229
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : str = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( a_ , unittest.TestCase ):
lowercase__ = GPTSwaTokenizer
lowercase__ = False
lowercase__ = True
lowercase__ = False
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Union[str, Any] = GPTSwaTokenizer(lowerCamelCase__ ,eos_token='<unk>' ,bos_token='<unk>' ,pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'This is a test'
_UpperCamelCase : int = 'This is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = '<s>'
_UpperCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(lowerCamelCase__ ) ,2000 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,2000 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = GPTSwaTokenizer(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[465, 287, 265, 631, 842] )
_UpperCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
lowerCamelCase__ ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ,)
# fmt: on
_UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = GPTSwaTokenizer(lowerCamelCase__ )
_UpperCamelCase : List[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
_UpperCamelCase : Any = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) ,lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_UpperCamelCase : List[Any] = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='AI-Sweden/gpt-sw3-126m' ,sequences=lowerCamelCase__ ,)
| 83
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
__UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = cn.convert_to_negative(_A )
# assert negative_img array for at least one True
assert negative_img.any()
def a_ ( ) -> int:
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_A , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def a_ ( ) -> List[str]:
"""simple docstring"""
snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case__ = canny.canny(_A )
# assert canny array for at least one True
assert canny_array.any()
def a_ ( ) -> Optional[int]:
"""simple docstring"""
assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all()
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
# laplace diagonals
snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case__ = conv.img_convolve(_A , _A ).astype(_A )
assert res.any()
def a_ ( ) -> Dict:
"""simple docstring"""
assert med.median_filter(_A , 3 ).any()
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ , snake_case__ = sob.sobel_filter(_A )
assert grad.any() and theta.any()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = sp.make_sepia(_A , 20 )
assert sepia.all()
def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]:
"""simple docstring"""
snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def a_ ( ) -> Any:
"""simple docstring"""
snake_case__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
snake_case__ = imread(_A , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case__ = 0
snake_case__ = 0
snake_case__ = image[x_coordinate][y_coordinate]
snake_case__ = lbp.get_neighbors_pixel(
_A , _A , _A , _A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case__ = lbp.local_binary_value(_A , _A , _A )
assert lbp_image.any()
| 307
| 0
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowercase : str = len(__magic_name__ )
lowercase : Any = max(__magic_name__ )
lowercase : Optional[Any] = min(__magic_name__ )
# create the counting array
lowercase : List[str] = coll_max + 1 - coll_min
lowercase : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __magic_name__ ):
lowercase : Union[str, Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __magic_name__ ) ):
lowercase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
return "".join([chr(__magic_name__ ) for i in counting_sort([ord(__magic_name__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 308
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : str , _A : Dict , _A : Optional[Any]=16 , _A : List[Any]=13 , _A : Dict=7 , _A : Dict=14 , _A : int=10 , _A : List[str]=19 , _A : Any=5 , _A : List[Any]=4 , _A : Optional[Any]=True , _A : List[str]=16 , _A : Any=2 , _A : Optional[int]=4 , _A : List[Any]=4 , _A : List[str]="gelu" , _A : Dict=0.1 , _A : Tuple=0.1 , _A : List[Any]=[1, 2, 3, 4, 5] , _A : List[Any]=25 , _A : List[Any]=5 , ) -> str:
"""simple docstring"""
lowercase : int = d_model
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : List[str] = prediction_length
lowercase : List[Any] = context_length
lowercase : Union[str, Any] = cardinality
lowercase : str = num_time_features
lowercase : str = lags_sequence
lowercase : Union[str, Any] = embedding_dimension
lowercase : str = is_training
lowercase : Dict = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : str = intermediate_size
lowercase : Optional[Any] = hidden_act
lowercase : List[Any] = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = context_length
lowercase : Union[str, Any] = prediction_length + label_length
lowercase : List[str] = label_length
lowercase : str = moving_average
lowercase : Optional[Any] = autocorrelation_factor
def __a ( self : Any ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __a ( self : Optional[int] , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = config.context_length + max(config.lags_sequence )
lowercase : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
lowercase : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
lowercase : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = self.get_config()
lowercase : List[Any] = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self : List[str] , _A : List[str] , _A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = AutoformerModel(config=_A ).to(_A ).eval()
lowercase : List[Any] = model(**_A )
lowercase : List[str] = outputs.encoder_last_hidden_state
lowercase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(_A )
lowercase : int = AutoformerEncoder.from_pretrained(_A ).to(_A )
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = model.create_network_inputs(**_A )
lowercase , lowercase : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase : str = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase : Tuple = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowercase : Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase : Tuple = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[str] = model.get_decoder()
decoder.save_pretrained(_A )
lowercase : Union[str, Any] = AutoformerDecoder.from_pretrained(_A ).to(_A )
lowercase : Dict = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : Dict = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = False
def __a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = AutoformerModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __a ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase : str = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
lowercase , lowercase : Dict = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info['''missing_keys'''] , [] )
def __a ( self : Tuple ) -> str:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __a ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : List[str] = inspect.signature(getattr(_A , '''forward''' ) )
# The main input is the name of the argument after `self`
lowercase : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] = model_class(_A )
lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Optional[int] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
lowercase : Union[str, Any] = getattr(self.model_tester , '''seq_length''' , _A )
lowercase : List[Any] = getattr(self.model_tester , '''decoder_seq_length''' , _A )
lowercase : Dict = getattr(self.model_tester , '''encoder_seq_length''' , _A )
lowercase : Optional[Any] = getattr(self.model_tester , '''d_model''' , _A )
lowercase : Tuple = getattr(self.model_tester , '''num_attention_heads''' , _A )
lowercase : List[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase : Any = True
lowercase : Optional[Any] = False
lowercase : Union[str, Any] = True
lowercase : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : str = model(**self._prepare_for_class(_A , _A ) )
lowercase : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Dict = True
lowercase : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : Tuple = model(**self._prepare_for_class(_A , _A ) )
lowercase : List[str] = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase : Dict = len(_A )
lowercase : Tuple = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
lowercase : str = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase : Union[str, Any] = True
lowercase : List[Any] = True
lowercase : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : List[str] = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case( __magic_name__="train-batch.pt" ) -> Any:
'''simple docstring'''
lowercase : Dict = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__magic_name__ , repo_type='''dataset''' )
lowercase : int = torch.load(__magic_name__ , map_location=__magic_name__ )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : str = prepare_batch()
with torch.no_grad():
lowercase : Optional[int] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowercase : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
lowercase : str = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : Any = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowercase : Tuple = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
lowercase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase : Any = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(_A )
lowercase : str = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowercase : List[Any] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowercase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
lowercase : str = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=_A )
lowercase : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1E-1 ) )
| 308
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
import torch
from transformers import AutoModel
class _A ( torch.nn.Module ):
def __init__( self : Optional[int] , _A : Union[str, Any]="sayef/fsner-bert-base-uncased" ) -> Any:
"""simple docstring"""
super(_A , self ).__init__()
lowercase : Optional[Any] = AutoModel.from_pretrained(_A , return_dict=_A )
lowercase : Dict = torch.nn.CosineSimilarity(3 , 1E-08 )
lowercase : Any = torch.nn.Softmax(dim=1 )
def __a ( self : List[Any] , **_A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.bert(**_A ).last_hidden_state
def __a ( self : Any , _A : List[str] ) -> Any:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=_A )
def __a ( self : List[Any] , _A : Optional[int] , _A : Dict , _A : int=1 ) -> Any:
"""simple docstring"""
return self.softmax(T * self.cos(_A , _A ) )
def __a ( self : Optional[Any] , _A : Tuple , _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = W_supports['''sizes'''].tolist()
lowercase : str = W_supports['''start_token_id'''].item()
lowercase : Tuple = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase : List[str] = self.BERT(**_A )
lowercase : Optional[Any] = self.BERT(**_A )
lowercase : int = None
lowercase : Optional[int] = None
lowercase : Dict = W_supports['''input_ids'''] == start_token_id
lowercase : int = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(_A ):
if i == 0:
lowercase : Union[str, Any] = 0
else:
lowercase : Tuple = support_sizes[i - 1]
lowercase : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
lowercase : Dict = S[s : s + size][end_token_masks[s : s + size]]
lowercase : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase : Dict = torch.vstack((p_starts, p_start) )
lowercase : Optional[int] = torch.vstack((p_ends, p_end) )
else:
lowercase : int = p_start
lowercase : Optional[int] = p_end
return p_starts, p_ends
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _A ( _lowerCamelCase ):
_UpperCamelCase : jnp.ndarray
@flax_register_to_config
class _A ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
_UpperCamelCase : int = 3_2
_UpperCamelCase : int = 4
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1_2_8_0
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : bool = False
def __a ( self : Tuple , _A : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
lowercase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase : List[Any] = jnp.zeros(_A , dtype=jnp.floataa )
lowercase : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowercase : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase , lowercase : List[Any] = jax.random.split(_A )
lowercase : int = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.block_out_channels
lowercase : Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase : str = self.num_attention_heads or self.attention_head_dim
# input
lowercase : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase : Union[str, Any] = FlaxTimestepEmbedding(_A , dtype=self.dtype )
lowercase : Optional[int] = self.only_cross_attention
if isinstance(_A , _A ):
lowercase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
lowercase : Any = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase : Optional[int] = []
lowercase : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowercase : str = output_channel
lowercase : Dict = block_out_channels[i]
lowercase : List[str] = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase : List[str] = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
lowercase : List[Any] = down_blocks
# mid
lowercase : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowercase : Optional[int] = []
lowercase : List[Any] = list(reversed(_A ) )
lowercase : Optional[Any] = list(reversed(_A ) )
lowercase : Any = list(reversed(_A ) )
lowercase : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowercase : Tuple = output_channel
lowercase : List[str] = reversed_block_out_channels[i]
lowercase : int = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
lowercase : int = i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowercase : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase : str = FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
lowercase : List[str] = output_channel
lowercase : Union[str, Any] = up_blocks
# out
lowercase : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , _A : Optional[Any] , _A : List[str] , _A : str , _A : List[str]=None , _A : Optional[Any]=None , _A : bool = True , _A : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(_A , jnp.ndarray ):
lowercase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowercase : Any = jnp.expand_dims(_A , 0 )
lowercase : Union[str, Any] = self.time_proj(_A )
lowercase : Dict = self.time_embedding(_A )
# 2. pre-process
lowercase : List[str] = jnp.transpose(_A , (0, 2, 3, 1) )
lowercase : Any = self.conv_in(_A )
# 3. down
lowercase : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
lowercase , lowercase : Any = down_block(_A , _A , _A , deterministic=not train )
else:
lowercase , lowercase : Tuple = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowercase : List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowercase : Optional[int] = new_down_block_res_samples
# 4. mid
lowercase : Tuple = self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowercase : Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
lowercase : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
lowercase : Tuple = up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
lowercase : Optional[int] = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
lowercase : List[Any] = self.conv_norm_out(_A )
lowercase : int = nn.silu(_A )
lowercase : List[str] = self.conv_out(_A )
lowercase : Tuple = jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
from math import pi, sqrt, tan
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__magic_name__ , 2 ) * torus_radius * tube_radius
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase : str = (sidea + sidea + sidea) / 2
lowercase : Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = '''deta'''
_UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , _A : str=None , _A : List[Any]=900 , _A : int=2_048 , _A : int=6 , _A : int=2_048 , _A : List[Any]=8 , _A : str=6 , _A : Optional[int]=1_024 , _A : Tuple=8 , _A : Optional[Any]=0.0 , _A : Optional[int]=True , _A : str="relu" , _A : List[str]=256 , _A : List[str]=0.1 , _A : Dict=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=0.02 , _A : List[Any]=1.0 , _A : int=True , _A : str=False , _A : List[Any]="sine" , _A : int=5 , _A : List[Any]=4 , _A : List[str]=4 , _A : Any=True , _A : List[str]=300 , _A : Dict=True , _A : Tuple=True , _A : List[Any]=1 , _A : Optional[Any]=5 , _A : List[Any]=2 , _A : Tuple=1 , _A : List[Any]=1 , _A : Union[str, Any]=5 , _A : int=2 , _A : Optional[Any]=0.1 , _A : List[str]=0.25 , **_A : int , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
lowercase : Dict = backbone_config.pop('''model_type''' )
lowercase : Dict = CONFIG_MAPPING[backbone_model_type]
lowercase : Any = config_class.from_dict(_A )
lowercase : int = backbone_config
lowercase : Dict = num_queries
lowercase : Dict = max_position_embeddings
lowercase : Union[str, Any] = d_model
lowercase : str = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : List[str] = decoder_ffn_dim
lowercase : Union[str, Any] = decoder_layers
lowercase : List[str] = decoder_attention_heads
lowercase : List[Any] = dropout
lowercase : str = attention_dropout
lowercase : List[str] = activation_dropout
lowercase : List[str] = activation_function
lowercase : Optional[int] = init_std
lowercase : int = init_xavier_std
lowercase : int = encoder_layerdrop
lowercase : Dict = auxiliary_loss
lowercase : Any = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : int = encoder_n_points
lowercase : Optional[Any] = decoder_n_points
lowercase : int = two_stage
lowercase : str = two_stage_num_proposals
lowercase : Optional[int] = with_box_refine
lowercase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Any = class_cost
lowercase : Optional[int] = bbox_cost
lowercase : Tuple = giou_cost
# Loss coefficients
lowercase : int = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : int = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : int = eos_coefficient
lowercase : Any = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __a ( self : int ) -> int:
"""simple docstring"""
return self.d_model
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] = self.backbone_config.to_dict()
lowercase : str = self.__class__.model_type
return output
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowercase : Any = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__magic_name__ ) )
return round(__magic_name__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__magic_name__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__magic_name__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__magic_name__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__magic_name__ , default='''data/dump''' , help='''The dump file prefix.''' )
lowercase : Tuple = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowercase : Any = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowercase : int = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase : int = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowercase : List[str] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowercase : Dict = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(__magic_name__ )} examples to process.""" )
lowercase : Optional[int] = []
lowercase : int = 0
lowercase : List[str] = 1_00_00
lowercase : List[str] = time.time()
for text in data:
lowercase : Optional[Any] = F"""{bos} {text.strip()} {sep}"""
lowercase : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
lowercase : Optional[int] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase : Optional[int] = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(__magic_name__ )} examples processed.""" )
lowercase : Dict = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase : Any = [np.uintaa(__magic_name__ ) for d in rslt]
else:
lowercase : Any = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(__magic_name__ , '''wb''' ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
lowerCAmelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : Any = True
lowercase : Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : Tuple = True
lowercase : Any = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ ) * [False]
lowercase : dict[int, list[int]] = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase : List[str] = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Optional[int] = []
lowercase : str = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase : List[str] = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase : Tuple = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = '''microsoft/speecht5_tts'''
_UpperCamelCase : Dict = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_UpperCamelCase : Any = '''text_reader'''
_UpperCamelCase : Dict = SpeechTaProcessor
_UpperCamelCase : Dict = SpeechTaForTextToSpeech
_UpperCamelCase : List[Any] = SpeechTaHifiGan
_UpperCamelCase : Optional[Any] = ['''text''']
_UpperCamelCase : str = ['''audio''']
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
if self.post_processor is None:
lowercase : Union[str, Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def __a ( self : Dict , _A : Optional[int] , _A : Optional[int]=None ) -> str:
"""simple docstring"""
lowercase : Optional[int] = self.pre_processor(text=_A , return_tensors='''pt''' , truncation=_A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowercase : Any = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowercase : Optional[Any] = torch.tensor(embeddings_dataset[7_305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __a ( self : Union[str, Any] , _A : Union[str, Any] ) -> str:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_A )
def __a ( self : Dict , _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_A ).cpu().detach()
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 1
|
import os
import numpy
import onnx
def snake_case( __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] = a.name
lowercase : Any = b.name
lowercase : str = ''''''
lowercase : int = ''''''
lowercase : Optional[Any] = a == b
lowercase : Optional[Any] = name_a
lowercase : List[Any] = name_b
return res
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__magic_name__ , __magic_name__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __magic_name__ , __magic_name__ )
_graph_replace_input_with(node_proto.attribute[1].g , __magic_name__ , __magic_name__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Dict = list(model.graph.initializer )
lowercase : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase : List[str] = inits[i].name
lowercase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] = os.path.dirname(__magic_name__ )
lowercase : Union[str, Any] = os.path.basename(__magic_name__ )
lowercase : int = onnx.load(os.path.join(__magic_name__ , __magic_name__ ) )
lowercase : Tuple = list(model.graph.initializer )
lowercase : Tuple = set()
lowercase : Any = {}
lowercase : List[Any] = []
lowercase : str = 0
for i in range(len(__magic_name__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__magic_name__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__magic_name__ )
dup_set.add(__magic_name__ )
lowercase : Union[str, Any] = inits[j].data_type
lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __magic_name__ )
total_reduced_size += mem_size
lowercase : str = inits[i].name
lowercase : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__magic_name__ )
else:
lowercase : int = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 10_24 / 10_24 / 10_24 , '''GB''' )
lowercase : str = sorted(__magic_name__ )
_remove_dup_initializers_from_model(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int = '''optimized_''' + model_file_name
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
onnx.save(__magic_name__ , __magic_name__ )
return new_model
| 308
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = BlenderbotSmallTokenizer
_UpperCamelCase : List[str] = False
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
super().setUp()
lowercase : Optional[int] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase : int = dict(zip(_A , range(len(_A ) ) ) )
lowercase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase : str = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def __a ( self : Optional[int] , **_A : List[Any] ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_A )
def __a ( self : Union[str, Any] , _A : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : Any = '''adapt act apte'''
lowercase : Union[str, Any] = '''adapt act apte'''
return input_text, output_text
def __a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Optional[int] = '''adapt act apte'''
lowercase : List[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase : Dict = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
lowercase : Union[str, Any] = '''I am a small frog.'''
lowercase : Dict = tok([src_text] , padding=_A , truncation=_A )['''input_ids''']
lowercase : Dict = tok.batch_decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase : List[str] = '''I am a small frog .'''
lowercase : Any = '''.'''
lowercase : List[str] = tok(_A )['''input_ids''']
lowercase : Optional[int] = tok(_A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
from __future__ import annotations
lowerCAmelCase_ = 8.988E9 # units = N * m^s * C^-2
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> dict[str, float]:
'''simple docstring'''
lowercase : str = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : str = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : Union[str, Any] = abs(__magic_name__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : Dict = abs(__magic_name__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Dict = (COULOMBS_CONSTANT * charge_product / abs(__magic_name__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 1
|
from PIL import Image
def snake_case( __magic_name__ , __magic_name__ ) -> Image:
'''simple docstring'''
def brightness(__magic_name__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__magic_name__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 308
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _A ( unittest.TestCase , _lowerCamelCase ):
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = load_tool('''text-to-speech''' )
self.tool.setup()
def __a ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Optional[int] = self.tool('''hey''' )
lowercase : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __a ( self : int ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = self.tool('''hey''' )
lowercase : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 308
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase_ = False
@skip_mps
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionAttendAndExcitePipeline
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls : Dict ) -> str:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_A )
@classmethod
def __a ( cls : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_A )
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
lowercase : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase : Union[str, Any] = CLIPTextModel(_A )
lowercase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self : Optional[Any] , _A : str , _A : Union[str, Any]=0 ) -> int:
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
lowercase : Dict = torch.manual_seed(_A )
else:
lowercase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
lowercase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = '''cpu'''
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Tuple = self.get_dummy_inputs(_A )
lowercase : Any = pipe(**_A ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowercase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : int ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self : int ) -> str:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self : Any ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
@classmethod
def __a ( cls : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_A )
@classmethod
def __a ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_A )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = torch.manual_seed(51 )
lowercase : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=_A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase : List[Any] = '''a painting of an elephant with glasses'''
lowercase : List[str] = [5, 7]
lowercase : List[str] = pipe(
prompt=_A , token_indices=_A , guidance_scale=7.5 , generator=_A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
lowercase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
assert column_title.isupper()
lowercase : Optional[int] = 0
lowercase : Optional[int] = len(__magic_name__ ) - 1
lowercase : Optional[int] = 0
while index >= 0:
lowercase : Optional[int] = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = '''xglm'''
_UpperCamelCase : int = ['''past_key_values''']
_UpperCamelCase : Tuple = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , _A : List[str]=256_008 , _A : int=2_048 , _A : Dict=1_024 , _A : Dict=4_096 , _A : Optional[Any]=24 , _A : Union[str, Any]=16 , _A : Union[str, Any]="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]=0.02 , _A : int=True , _A : Any=True , _A : Dict=2 , _A : Any=1 , _A : Dict=0 , _A : str=2 , **_A : List[Any] , ) -> str:
"""simple docstring"""
lowercase : List[Any] = vocab_size
lowercase : List[str] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[int] = ffn_dim
lowercase : Union[str, Any] = num_layers
lowercase : int = attention_heads
lowercase : int = activation_function
lowercase : Dict = dropout
lowercase : Optional[int] = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Optional[Any] = layerdrop
lowercase : Dict = init_std
lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : Dict = use_cache
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowerCAmelCase_ = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[str] = torch.load(__magic_name__ , map_location='''cpu''' )
return sd
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=rename_keys_prefix ) -> int:
'''simple docstring'''
lowercase : List[str] = OrderedDict()
lowercase : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase : Dict = key
for name_pair in rename_keys_prefix:
lowercase : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
lowercase : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase : Tuple = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase : Union[str, Any] = '''pretraining'''
if "vcr" in checkpoint_path:
lowercase : Any = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
lowercase : List[str] = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
lowercase : int = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
lowercase : Union[str, Any] = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase : Tuple = {'''visual_embedding_dim''': 5_12}
lowercase : str = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase : int = {'''visual_embedding_dim''': 20_48}
lowercase : List[Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase : str = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
lowercase : Any = '''vqa'''
elif "nlvr" in checkpoint_path:
lowercase : Optional[Any] = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
lowercase : List[str] = '''nlvr'''
lowercase : int = VisualBertConfig(**__magic_name__ )
# Load State Dict
lowercase : List[Any] = load_state_dict(__magic_name__ )
lowercase : Any = get_new_dict(__magic_name__ , __magic_name__ )
if model_type == "pretraining":
lowercase : Any = VisualBertForPreTraining(__magic_name__ )
elif model_type == "vqa":
lowercase : Any = VisualBertForQuestionAnswering(__magic_name__ )
elif model_type == "nlvr":
lowercase : int = VisualBertForVisualReasoning(__magic_name__ )
elif model_type == "multichoice":
lowercase : int = VisualBertForMultipleChoice(__magic_name__ )
model.load_state_dict(__magic_name__ )
# Save Checkpoints
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 308
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = ['''input_values''', '''padding_mask''']
def __init__( self : Dict , _A : int = 1 , _A : int = 24_000 , _A : float = 0.0 , _A : float = None , _A : float = None , **_A : Dict , ) -> str:
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
lowercase : Dict = chunk_length_s
lowercase : str = overlap
@property
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Optional[int] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[bool, str, PaddingStrategy]] = None , _A : Optional[bool] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowercase : Union[str, Any] = True
lowercase : List[str] = bool(
isinstance(_A , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowercase : int = [np.asarray(_A , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : Tuple = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray(_A ).T]
# verify inputs are valid
for idx, example in enumerate(_A ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowercase : Optional[int] = None
lowercase : List[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase : int = min(array.shape[0] for array in raw_audio )
lowercase : Any = int(np.floor(max_length / self.chunk_stride ) )
lowercase : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase : Optional[Any] = max(array.shape[0] for array in raw_audio )
lowercase : int = int(np.ceil(max_length / self.chunk_stride ) )
lowercase : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase : List[str] = '''max_length'''
else:
lowercase : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
lowercase : int = self.pad(
_A , max_length=_A , truncation=_A , padding=_A , return_attention_mask=_A , )
if padding:
lowercase : Any = padded_inputs.pop('''attention_mask''' )
lowercase : Any = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowercase : Optional[Any] = example[..., None]
input_values.append(example.T )
lowercase : List[str] = input_values
if return_tensors is not None:
lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 308
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case( __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : Dict = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + user_agent
return ua
def snake_case( __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> str:
'''simple docstring'''
if token is None:
lowercase : Any = HfFolder.get_token()
if organization is None:
lowercase : List[Any] = whoami(__magic_name__ )['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__magic_name__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase : Any = args.hub_token if hasattr(__magic_name__ , '''hub_token''' ) else None
lowercase : List[str] = get_full_repo_name(__magic_name__ , token=__magic_name__ )
lowercase : int = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__magic_name__ , model_name=__magic_name__ , repo_name=__magic_name__ , dataset_name=args.dataset_name if hasattr(__magic_name__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__magic_name__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase : Union[str, Any] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ = None ) -> Optional[int]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : str = str(Path(__magic_name__ ).as_posix() )
lowercase : Union[str, Any] = re.search(r'''snapshots/([^/]+)/''' , __magic_name__ )
if search is None:
return None
lowercase : List[str] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def snake_case( __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : Union[str, Any] = old_diffusers_cache
lowercase : Optional[Any] = Path(__magic_name__ ).expanduser()
lowercase : int = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : List[Any] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
os.replace(__magic_name__ , __magic_name__ )
try:
os.symlink(__magic_name__ , __magic_name__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def snake_case( __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase : Union[str, Any] = weights_name.split('''.''' )
lowercase : List[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : Tuple = '''.'''.join(__magic_name__ )
return weights_name
def snake_case( __magic_name__ , *,
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) ):
# Load from a PyTorch checkpoint
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ , __magic_name__ , __magic_name__ ) ):
lowercase : Any = os.path.join(__magic_name__ , __magic_name__ , __magic_name__ )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase : str = hf_hub_download(
__magic_name__ , filename=_add_variant(__magic_name__ , __magic_name__ ) , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __magic_name__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ , __magic_name__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ , __magic_name__ )}' so that the correct variant file can be added.""" , __magic_name__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
__magic_name__ , filename=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 308
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = ['''image_processor''', '''tokenizer''']
_UpperCamelCase : str = '''CLIPImageProcessor'''
_UpperCamelCase : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , _A : str=None , _A : Tuple=None , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
lowercase : List[Any] = kwargs.pop('''feature_extractor''' )
lowercase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self : Any , _A : str=None , _A : str=None , _A : str=None , **_A : Optional[int] ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : Optional[int] = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
lowercase : int = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
lowercase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def __a ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __a ( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.tokenizer.model_input_names
lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self : Dict ) -> str:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _A :
pass
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase : Union[str, Any] = str(bin(__magic_name__ ) )[2:]
lowercase : Optional[int] = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
import string
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowercase : Optional[int] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase : Tuple = string.ascii_uppercase.find(__magic_name__ )
lowercase : Tuple = num - key
if num < 0:
lowercase : str = num + len(string.ascii_uppercase )
lowercase : str = translated + string.ascii_uppercase[num]
else:
lowercase : Any = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Dict = input('''Encrypted message: ''' )
lowercase : Tuple = message.upper()
decrypt(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'allenai/led-base-16384': 1_63_84,
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = LEDTokenizer
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _A : Dict=None , _A : Dict=None , _A : Dict=None , _A : str="replace" , _A : Dict="<s>" , _A : Union[str, Any]="</s>" , _A : List[str]="</s>" , _A : int="<s>" , _A : str="<unk>" , _A : Any="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , _A : Dict=True , **_A : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
lowercase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : Union[str, Any] = getattr(_A , pre_tok_state.pop('''type''' ) )
lowercase : Dict = add_prefix_space
lowercase : Tuple = pre_tok_class(**_A )
lowercase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Optional[Any] = '''post_processor'''
lowercase : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
lowercase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Dict = tuple(state['''cls'''] )
lowercase : Union[str, Any] = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
lowercase : str = add_prefix_space
lowercase : str = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
lowercase : str = trim_offsets
lowercase : List[Any] = True
if changes_to_apply:
lowercase : Optional[int] = getattr(_A , state.pop('''type''' ) )
lowercase : Dict = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self : List[str] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Dict , _A : Tuple ) -> Any:
"""simple docstring"""
lowercase : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
lowercase : Union[str, Any] = value
def __a ( self : int , *_A : List[str] , **_A : str ) -> BatchEncoding:
"""simple docstring"""
lowercase : Union[str, Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def __a ( self : Tuple , *_A : Any , **_A : Any ) -> BatchEncoding:
"""simple docstring"""
lowercase : Optional[int] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def __a ( self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : Optional[int] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __a ( self : Optional[Any] , _A : Dict , _A : List[Any]=None ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : str = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : int , _A : Union[Dict[str, EncodedInput], BatchEncoding] , _A : Optional[int] = None , _A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _A : Optional[int] = None , _A : Optional[bool] = None , ) -> dict:
"""simple docstring"""
lowercase : Optional[Any] = super()._pad(
encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
# Load from model defaults
if return_attention_mask is None:
lowercase : List[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : List[str] = len(encoded_inputs['''global_attention_mask'''] ) != len(_A )
if needs_to_be_padded:
lowercase : Any = len(_A ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : Optional[int] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : List[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
from __future__ import annotations
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase , lowercase : Union[str, Any] = array[indexa], array[indexa]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if length > 1:
lowercase : Union[str, Any] = int(length / 2 )
for i in range(__magic_name__ , low + middle ):
comp_and_swap(__magic_name__ , __magic_name__ , i + middle , __magic_name__ )
bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
bitonic_merge(__magic_name__ , low + middle , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if length > 1:
lowercase : Optional[int] = int(length / 2 )
bitonic_sort(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
bitonic_sort(__magic_name__ , low + middle , __magic_name__ , 0 )
bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : int = AudioLDMPipeline
_UpperCamelCase : Tuple = TEXT_TO_AUDIO_PARAMS
_UpperCamelCase : int = TEXT_TO_AUDIO_BATCH_PARAMS
_UpperCamelCase : int = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , )
lowercase : Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowercase : Optional[Any] = ClapTextModelWithProjection(_A )
lowercase : Any = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
lowercase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , )
lowercase : Any = SpeechTaHifiGan(_A )
lowercase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __a ( self : Union[str, Any] , _A : str , _A : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
lowercase : Union[str, Any] = torch.manual_seed(_A )
else:
lowercase : Dict = torch.Generator(device=_A ).manual_seed(_A )
lowercase : int = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : int = AudioLDMPipeline(**_A )
lowercase : int = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Dict = self.get_dummy_inputs(_A )
lowercase : Dict = audioldm_pipe(**_A )
lowercase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
lowercase : int = audio[:10]
lowercase : Any = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = self.get_dummy_components()
lowercase : Union[str, Any] = AudioLDMPipeline(**_A )
lowercase : Any = audioldm_pipe.to(_A )
lowercase : Union[str, Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[Any] = self.get_dummy_inputs(_A )
lowercase : Tuple = 3 * [inputs['''prompt''']]
# forward
lowercase : Any = audioldm_pipe(**_A )
lowercase : Optional[Any] = output.audios[0]
lowercase : Any = self.get_dummy_inputs(_A )
lowercase : Any = 3 * [inputs.pop('''prompt''' )]
lowercase : Optional[Any] = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
lowercase : Any = text_inputs['''input_ids'''].to(_A )
lowercase : Union[str, Any] = audioldm_pipe.text_encoder(
_A , )
lowercase : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : str = F.normalize(_A , dim=-1 )
lowercase : List[str] = prompt_embeds
# forward
lowercase : Dict = audioldm_pipe(**_A )
lowercase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase : Tuple = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : Tuple = audioldm_pipe.to(_A )
lowercase : int = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = self.get_dummy_inputs(_A )
lowercase : str = 3 * ['''this is a negative prompt''']
lowercase : Any = negative_prompt
lowercase : Any = 3 * [inputs['''prompt''']]
# forward
lowercase : List[str] = audioldm_pipe(**_A )
lowercase : List[str] = output.audios[0]
lowercase : Optional[Any] = self.get_dummy_inputs(_A )
lowercase : Union[str, Any] = 3 * [inputs.pop('''prompt''' )]
lowercase : Dict = []
for p in [prompt, negative_prompt]:
lowercase : List[str] = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
lowercase : Union[str, Any] = text_inputs['''input_ids'''].to(_A )
lowercase : int = audioldm_pipe.text_encoder(
_A , )
lowercase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : int = F.normalize(_A , dim=-1 )
embeds.append(_A )
lowercase , lowercase : Any = embeds
# forward
lowercase : List[Any] = audioldm_pipe(**_A )
lowercase : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A )
lowercase : Dict = AudioLDMPipeline(**_A )
lowercase : Union[str, Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : str = self.get_dummy_inputs(_A )
lowercase : Optional[Any] = '''egg cracking'''
lowercase : Union[str, Any] = audioldm_pipe(**_A , negative_prompt=_A )
lowercase : int = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
lowercase : List[Any] = audio[:10]
lowercase : Dict = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : int = PNDMScheduler(skip_prk_steps=_A )
lowercase : int = AudioLDMPipeline(**_A )
lowercase : Any = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase : Optional[Any] = audioldm_pipe(_A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase : Any = 2
lowercase : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase : Dict = 2
lowercase : Union[str, Any] = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase : Optional[Any] = 2
lowercase : List[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Dict = audioldm_pipe.vocoder.config.sampling_rate
lowercase : Optional[int] = self.get_dummy_inputs(_A )
lowercase : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **_A )
lowercase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.016
lowercase : Dict = audioldm_pipe(audio_length_in_s=0.032 , **_A )
lowercase : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.032
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : str = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : Tuple = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = ['''hey''']
lowercase : Dict = audioldm_pipe(_A , num_inference_steps=1 )
lowercase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowercase : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase : Optional[int] = SpeechTaHifiGan(_A ).to(_A )
lowercase : Dict = audioldm_pipe(_A , num_inference_steps=1 )
lowercase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A )
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A )
@slow
class _A ( unittest.TestCase ):
def __a ( self : str ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] , _A : List[Any] , _A : str="cpu" , _A : Dict=torch.floataa , _A : str=0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
lowercase : List[str] = np.random.RandomState(_A ).standard_normal((1, 8, 128, 16) )
lowercase : List[Any] = torch.from_numpy(_A ).to(device=_A , dtype=_A )
lowercase : Optional[Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : Dict = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[int] = self.get_inputs(_A )
lowercase : Any = 25
lowercase : str = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 81_920
lowercase : Dict = audio[77_230:77_240]
lowercase : Any = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
lowercase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[int] = self.get_inputs(_A )
lowercase : Tuple = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 81_920
lowercase : Dict = audio[27_780:27_790]
lowercase : int = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
lowercase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
with open(__magic_name__ ) as metadata_file:
lowercase : Dict = json.load(__magic_name__ )
lowercase : str = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowercase : str = torch.load(__magic_name__ , map_location='''cpu''' )
# Load the entity vocab file
lowercase : Union[str, Any] = load_entity_vocab(__magic_name__ )
lowercase : Tuple = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase : int = AddedToken('''<ent>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
lowercase : Dict = AddedToken('''<ent2>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowercase : Tuple = LukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
lowercase : Tuple = state_dict['''embeddings.word_embeddings.weight''']
lowercase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
lowercase : str = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
lowercase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase : Any = F"""encoder.layer.{layer_index}.attention.self."""
lowercase : Any = state_dict[prefix + matrix_name]
lowercase : Any = state_dict[prefix + matrix_name]
lowercase : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowercase : List[Any] = entity_emb[entity_vocab['''[MASK]''']]
lowercase : Any = LukeModel(config=__magic_name__ ).eval()
lowercase , lowercase : Optional[Any] = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if not (len(__magic_name__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {', '.join(__magic_name__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
lowercase : Any = LukeTokenizer.from_pretrained(__magic_name__ , task='''entity_classification''' )
lowercase : List[Any] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowercase : List[Any] = (39, 42)
lowercase : List[Any] = tokenizer(__magic_name__ , entity_spans=[span] , add_prefix_space=__magic_name__ , return_tensors='''pt''' )
lowercase : int = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
lowercase : Optional[int] = torch.Size((1, 42, 10_24) )
lowercase : List[Any] = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowercase : List[str] = torch.Size((1, 42, 7_68) )
lowercase : List[str] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase : List[Any] = torch.Size((1, 1, 10_24) )
lowercase : Optional[int] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowercase : Optional[Any] = torch.Size((1, 1, 7_68) )
lowercase : Any = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : int = {}
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
lowercase , lowercase : Optional[Any] = line.rstrip().split('''\t''' )
lowercase : Optional[Any] = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowerCAmelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : Dict[str, int] , _A : List[str] , _A : int = None , _A : int = None ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase : Tuple = pad_token_id
lowercase : Any = max_length
lowercase : int = vocab
lowercase : Optional[int] = merges
lowercase : List[Any] = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def __a ( cls : List[Any] , _A : GPTaTokenizer , *_A : Optional[int] , **_A : Dict ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = [''' '''.join(_A ) for m in tokenizer.bpe_ranks.keys()]
lowercase : Dict = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def __a ( cls : Any , _A : Union[str, os.PathLike] , *_A : List[str] , **_A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def __a ( cls : int , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return cls(**_A )
def __a ( self : str ) -> Optional[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self : Any , _A : Optional[int] , _A : int = None ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = self.tf_tokenizer(_A )
lowercase : Any = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase , lowercase : List[str] = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def snake_case( __magic_name__ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
lowercase : str = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__magic_name__ ).content ).xpath(__magic_name__ ) )
lowerCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 308
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = '''big_bird'''
def __init__( self : Any , _A : Optional[int]=50_358 , _A : Union[str, Any]=768 , _A : List[str]=12 , _A : Optional[int]=12 , _A : int=3_072 , _A : Optional[int]="gelu_new" , _A : str=0.1 , _A : List[str]=0.1 , _A : int=4_096 , _A : List[Any]=2 , _A : Union[str, Any]=0.02 , _A : Union[str, Any]=1E-12 , _A : Dict=True , _A : Optional[int]=0 , _A : int=1 , _A : List[Any]=2 , _A : Dict=66 , _A : int="block_sparse" , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]=64 , _A : Optional[Any]=3 , _A : Tuple=None , **_A : int , ) -> Any:
"""simple docstring"""
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
lowercase : str = vocab_size
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : int = intermediate_size
lowercase : str = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = initializer_range
lowercase : Optional[int] = type_vocab_size
lowercase : Tuple = layer_norm_eps
lowercase : Union[str, Any] = use_cache
lowercase : Optional[Any] = rescale_embeddings
lowercase : str = attention_type
lowercase : Dict = use_bias
lowercase : Optional[int] = block_size
lowercase : Any = num_random_blocks
lowercase : int = classifier_dropout
class _A ( _lowerCamelCase ):
@property
def __a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
from __future__ import annotations
lowerCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _A :
def __init__( self : List[Any] , _A : dict[str, list[str]] , _A : str ) -> None:
"""simple docstring"""
lowercase : List[str] = graph
# mapping node to its parent in resulting breadth first tree
lowercase : dict[str, str | None] = {}
lowercase : Any = source_vertex
def __a ( self : int ) -> None:
"""simple docstring"""
lowercase : List[str] = {self.source_vertex}
lowercase : Optional[Any] = None
lowercase : Optional[int] = [self.source_vertex] # first in first out queue
while queue:
lowercase : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A )
lowercase : Union[str, Any] = vertex
queue.append(_A )
def __a ( self : str , _A : str ) -> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase : int = self.parent.get(_A )
if target_vertex_parent is None:
lowercase : Dict = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(_A )
return self.shortest_path(_A ) + f"""->{target_vertex}"""
if __name__ == "__main__":
lowerCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 1
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = '''huggingface/label-files'''
lowercase : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowercase : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Optional[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Tuple = {v: k for k, v in idalabel.items()}
lowercase : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase : Union[str, Any] = BitConfig(
conv_layer=__magic_name__ , num_labels=10_00 , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowercase : Dict = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase : List[Any] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase : Any = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase : List[str] = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase : Optional[int] = '''bit.encoder.''' + name
return name
def snake_case( ) -> Any:
'''simple docstring'''
lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = get_config(__magic_name__ )
# load original model from timm
lowercase : Optional[int] = create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model
lowercase : Union[str, Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase : Optional[int] = state_dict.pop(__magic_name__ )
lowercase : Optional[Any] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase : str = BitForImageClassification(__magic_name__ )
model.eval()
model.load_state_dict(__magic_name__ )
# create image processor
lowercase : Dict = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
lowercase : Tuple = transform.transforms
lowercase : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase : Union[str, Any] = BitImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase : Optional[int] = prepare_img()
lowercase : Any = transform(__magic_name__ ).unsqueeze(0 )
lowercase : Optional[int] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
lowercase : int = model(__magic_name__ )
lowercase : int = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase : Optional[int] = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
def snake_case( __magic_name__ = 10**9 ) -> int:
'''simple docstring'''
lowercase : Any = 1
lowercase : int = 2
lowercase : Any = 0
lowercase : str = 0
lowercase : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _A : int , _A : Any=7 , _A : List[Any]=3 , _A : Union[str, Any]=30 , _A : str=400 , _A : Optional[int]=True , _A : Dict=None , _A : Tuple=True , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=[0.5, 0.5, 0.5] , _A : Optional[Any]=True , _A : List[Any]=1 / 255 , _A : Dict=True , ) -> int:
"""simple docstring"""
lowercase : Tuple = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
lowercase : List[Any] = parent
lowercase : Tuple = batch_size
lowercase : int = num_channels
lowercase : int = min_resolution
lowercase : Optional[Any] = max_resolution
lowercase : int = do_resize
lowercase : Any = size
lowercase : str = do_normalize
lowercase : Dict = image_mean
lowercase : Any = image_std
lowercase : List[Any] = do_rescale
lowercase : Any = rescale_factor
lowercase : Any = do_pad
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self : int , _A : Union[str, Any] , _A : Optional[int]=False ) -> Any:
"""simple docstring"""
if not batched:
lowercase : str = image_inputs[0]
if isinstance(_A , Image.Image ):
lowercase , lowercase : Dict = image.size
else:
lowercase , lowercase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowercase : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
lowercase : Dict = self.size['''shortest_edge''']
elif w > h:
lowercase : Tuple = self.size['''shortest_edge''']
lowercase : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : Optional[int] = self.size['''shortest_edge''']
lowercase : Any = self.size['''shortest_edge''']
else:
lowercase : Union[str, Any] = []
for image in image_inputs:
lowercase , lowercase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : int = max(_A , key=lambda _A : item[0] )[0]
lowercase : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = DetaImageProcessingTester(self )
@property
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Any = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
lowercase : str = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Tuple = image_processing(_A , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : List[str] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : List[str] = json.loads(f.read() )
lowercase : List[Any] = {'''image_id''': 39_769, '''annotations''': target}
# encode them
lowercase : Tuple = DetaImageProcessor()
lowercase : Union[str, Any] = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
lowercase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
lowercase : Dict = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
lowercase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
lowercase : Union[str, Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
lowercase : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
lowercase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
lowercase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
lowercase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
lowercase : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Tuple = json.loads(f.read() )
lowercase : Optional[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
lowercase : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[int] = DetaImageProcessor(format='''coco_panoptic''' )
lowercase : Tuple = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
lowercase : int = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
lowercase : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
lowercase : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
lowercase : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
lowercase : Optional[int] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
lowercase : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
lowercase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
lowercase : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
lowercase : List[Any] = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
lowercase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
lowercase : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
from __future__ import annotations
from statistics import mean
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : List[str] = [0] * no_of_processes
lowercase : List[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__magic_name__ ):
lowercase : Dict = burst_time[i]
lowercase : list[int] = []
lowercase : Union[str, Any] = 0
lowercase : str = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase : List[str] = []
lowercase : Optional[int] = -1
for i in range(__magic_name__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
lowercase : int = 0
lowercase : List[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : List[Any] = [0] * no_of_processes
for i in range(__magic_name__ ):
lowercase : List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCAmelCase_ = 4
lowerCAmelCase_ = [2, 5, 3, 7]
lowerCAmelCase_ = [0, 0, 0, 0]
lowerCAmelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 308
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 1
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = 'Hello world! cécé herlolip'
lowerCAmelCase_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = BertAbsConfig(
temp_dir='''.''' , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder='''bert''' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
lowercase : Any = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage )
lowercase : Optional[int] = AbsSummarizer(__magic_name__ , torch.device('''cpu''' ) , __magic_name__ )
original.eval()
lowercase : str = BertAbsSummarizer(__magic_name__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
lowercase : List[str] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
lowercase : Optional[int] = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) )
lowercase : Tuple = torch.tensor(__magic_name__ ).unsqueeze(0 )
lowercase : Tuple = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) )
lowercase : List[Any] = torch.tensor(__magic_name__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase : Union[str, Any] = encoder_input_ids
lowercase : List[Any] = decoder_input_ids
lowercase : str = None
lowercase : Any = None
lowercase : str = None
lowercase : Optional[int] = None
lowercase : int = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase : Tuple = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase : List[str] = original.generator(__magic_name__ )
lowercase : Tuple = new_model(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase : Optional[int] = new_model.generator(__magic_name__ )
lowercase : Optional[int] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__magic_name__ ) )
lowercase : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__magic_name__ ) )
lowercase : Tuple = torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowerCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( _A : ArgumentParser ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 308
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case( __magic_name__=32 , __magic_name__=10 , __magic_name__=1_00 , __magic_name__=10_26 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ) -> List[str]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowercase , lowercase : str = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=10_26 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase : List[Any] = load_gpta('''gpt2''' ).to(__magic_name__ )
print('''computing perplexity on objective set''' )
lowercase : Optional[int] = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print('''perplexity on objective set:''' , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case( __magic_name__ , __magic_name__=15 , __magic_name__=1_28 , __magic_name__=1_00 , __magic_name__="igf_model.pt" , ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
lowercase : Optional[int] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase : Tuple = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase : Any = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=1_00 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=10_00 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase : List[str] = RandomSampler(__magic_name__ )
lowercase : Optional[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase : int = max_steps // (len(__magic_name__ )) + 1
lowercase : Tuple = 0
lowercase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase , lowercase , lowercase : Dict = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase : Dict = []
lowercase : List[str] = 0
lowercase : List[str] = []
lowercase : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
lowercase : List[str] = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print('''Test perplexity, step''' , __magic_name__ , ''':''' , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase : Tuple = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : Any = True
if secondary_learner is not None:
lowercase : str = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase : Dict = -1
if predicted_q < threshold:
lowercase : Optional[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase : Optional[Any] = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print('''Test perplexity, step''' , __magic_name__ , ''':''' , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : Any = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__magic_name__ , default=__magic_name__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__magic_name__ , default=__magic_name__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__magic_name__ , type=__magic_name__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__magic_name__ , default=__magic_name__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=__magic_name__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=__magic_name__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=__magic_name__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=__magic_name__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=__magic_name__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=__magic_name__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=__magic_name__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=__magic_name__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=__magic_name__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=__magic_name__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__magic_name__ , type=__magic_name__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__magic_name__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__magic_name__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__magic_name__ , type=__magic_name__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=__magic_name__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase : List[Any] = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase : int = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase , lowercase : int = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
from functools import lru_cache
@lru_cache
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[Any] = ['''flax''']
def __init__( self : Tuple , *_A : Any , **_A : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Optional[int] , *_A : int , **_A : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : Dict , **_A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Dict = ['''flax''']
def __init__( self : List[Any] , *_A : str , **_A : Optional[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : int , **_A : int ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : List[Any] , *_A : Optional[int] , **_A : Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : int = ['''flax''']
def __init__( self : str , *_A : Dict , **_A : int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : int , *_A : List[str] , **_A : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : List[str] , *_A : str , **_A : str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Optional[int] = ['''flax''']
def __init__( self : str , *_A : Any , **_A : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : List[Any] , *_A : Tuple , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : List[Any] , *_A : List[str] , **_A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Optional[int] = ['''flax''']
def __init__( self : Optional[int] , *_A : Optional[Any] , **_A : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : List[Any] , *_A : int , **_A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : Any , *_A : str , **_A : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[str] = ['''flax''']
def __init__( self : Optional[int] , *_A : str , **_A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Any , *_A : Dict , **_A : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : str , *_A : Tuple , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : str = ['''flax''']
def __init__( self : Optional[Any] , *_A : List[Any] , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Optional[int] , *_A : str , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : Optional[Any] , *_A : List[str] , **_A : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[str] = ['''flax''']
def __init__( self : List[str] , *_A : Dict , **_A : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : str , *_A : Tuple , **_A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : str , *_A : List[str] , **_A : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Any = ['''flax''']
def __init__( self : str , *_A : Tuple , **_A : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : List[Any] , *_A : Optional[int] , **_A : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : str , *_A : List[Any] , **_A : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[Any] = ['''flax''']
def __init__( self : List[Any] , *_A : Tuple , **_A : int ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Tuple , *_A : List[Any] , **_A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : List[str] , *_A : str , **_A : Any ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : int = ['''flax''']
def __init__( self : str , *_A : List[str] , **_A : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Any , *_A : Tuple , **_A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : int , *_A : List[str] , **_A : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Any = ['''flax''']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : int , *_A : Optional[int] , **_A : Tuple ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : Dict , *_A : str , **_A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : List[Any] = ['''flax''']
def __init__( self : List[Any] , *_A : str , **_A : List[Any] ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : Any , **_A : int ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def __a ( cls : Dict , *_A : Any , **_A : Tuple ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
from __future__ import annotations
def snake_case( __magic_name__ = 4 ) -> list[list[int]]:
'''simple docstring'''
lowercase : Tuple = abs(__magic_name__ ) or 4
return [[1 + x + y * row_size for x in range(__magic_name__ )] for y in range(__magic_name__ )]
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(__magic_name__ ) )
# OR.. transpose(reverse_column(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(__magic_name__ ) )
# OR.. reverse_column(reverse_row(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(__magic_name__ ) )
# OR.. transpose(reverse_row(matrix))
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : Optional[Any] = [list(__magic_name__ ) for x in zip(*__magic_name__ )]
return matrix
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : Any = matrix[::-1]
return matrix
def snake_case( __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : List[str] = [x[::-1] for x in matrix]
return matrix
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
for i in matrix:
print(*__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
def snake_case( __magic_name__ = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase : Any = limit + 1
lowercase : Union[str, Any] = [0] * limit
for first_term in range(1 , __magic_name__ ):
for n in range(__magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : List[str] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase : Union[str, Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCAmelCase_ = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
lowerCAmelCase_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowerCAmelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case( __magic_name__ ) -> dict[str, int]:
'''simple docstring'''
lowercase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
return x[0]
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[int] = get_letter_count(__magic_name__ )
lowercase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__magic_name__ )
lowercase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__magic_name__ )
lowercase : Any = ''''''.join(freq_to_letter[freq] )
lowercase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__magic_name__ , reverse=__magic_name__ )
lowercase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__magic_name__ )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Tuple = get_frequency_order(__magic_name__ )
lowercase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _A ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , _A : float , _A : Callable , _A : int , _A : float = 1.0 , _A : str = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase : Dict = initial_learning_rate
lowercase : str = warmup_steps
lowercase : str = power
lowercase : Dict = decay_schedule_fn
lowercase : str = name
def __call__( self : Any , _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : Any = tf.cast(_A , tf.floataa )
lowercase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowercase : List[Any] = global_step_float / warmup_steps_float
lowercase : List[str] = self.initial_learning_rate * tf.math.pow(_A , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_A , )
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 0.9 , __magic_name__ = 0.9_9_9 , __magic_name__ = 1e-8 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0.0 , __magic_name__ = 1.0 , __magic_name__ = None , ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__magic_name__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__magic_name__ , )
if num_warmup_steps:
lowercase : List[Any] = WarmUp(
initial_learning_rate=__magic_name__ , decay_schedule_fn=__magic_name__ , warmup_steps=__magic_name__ , )
if weight_decay_rate > 0.0:
lowercase : List[str] = AdamWeightDecay(
learning_rate=__magic_name__ , weight_decay_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__magic_name__ , )
else:
lowercase : Any = tf.keras.optimizers.Adam(
learning_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _A ( _lowerCamelCase ):
def __init__( self : Optional[Any] , _A : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _A : float = 0.9 , _A : float = 0.999 , _A : float = 1E-7 , _A : bool = False , _A : float = 0.0 , _A : Optional[List[str]] = None , _A : Optional[List[str]] = None , _A : str = "AdamWeightDecay" , **_A : Dict , ) -> str:
"""simple docstring"""
super().__init__(_A , _A , _A , _A , _A , _A , **_A )
lowercase : Any = weight_decay_rate
lowercase : Union[str, Any] = include_in_weight_decay
lowercase : Optional[Any] = exclude_from_weight_decay
@classmethod
def __a ( cls : Tuple , _A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = {'''WarmUp''': WarmUp}
return super(_A , cls ).from_config(_A , custom_objects=_A )
def __a ( self : Dict , _A : Optional[int] , _A : List[Any] , _A : Optional[int] ) -> List[str]:
"""simple docstring"""
super(_A , self )._prepare_local(_A , _A , _A )
lowercase : Dict = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __a ( self : List[Any] , _A : str , _A : Union[str, Any] , _A : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self : int , _A : int , _A : List[Any]=None , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase , lowercase : Dict = list(zip(*_A ) )
return super(_A , self ).apply_gradients(zip(_A , _A ) , name=_A , **_A )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : List[str] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Any = apply_state or {}
lowercase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : List[Any] = self._fallback_apply_state(_A , _A )
lowercase : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self : Any , _A : List[Any] , _A : Optional[int] , _A : int=None ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : Any = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_dense(_A , _A , **_A )
def __a ( self : Tuple , _A : Tuple , _A : Dict , _A : Tuple , _A : int=None ) -> List[Any]:
"""simple docstring"""
lowercase , lowercase : Any = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : Optional[Any] = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_sparse(_A , _A , _A , **_A )
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : int = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __a ( self : Any , _A : int ) -> Union[str, Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_A , _A ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_A , _A ) is not None:
return False
return True
class _A ( _lowerCamelCase ):
def __init__( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase : Optional[int] = []
lowercase : Any = None
@property
def __a ( self : Dict ) -> int:
"""simple docstring"""
if self._accum_steps is None:
lowercase : Optional[int] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , _A : Optional[int] ) -> Dict:
"""simple docstring"""
if not self._gradients:
lowercase : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_A ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_A ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(_A )}""" )
for accum_gradient, gradient in zip(self._gradients , _A ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_A )
self._accum_steps.assign_add(1 )
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_A ) )
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = ['''pixel_values''']
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : str = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' )
lowercase : Optional[int] = do_resize
lowercase : List[Any] = size
lowercase : Optional[Any] = resample
lowercase : Tuple = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : int = do_rescale
lowercase : List[str] = rescale_factor
lowercase : Optional[int] = do_normalize
lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : Dict = do_convert_rgb
def __a ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase : str = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Dict = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ) -> np.ndarray:
"""simple docstring"""
lowercase : Dict = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Tuple , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> int:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : Tuple , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] = size if size is not None else self.size
lowercase : List[str] = get_size_dict(_A , param_name='''size''' , default_to_square=_A )
lowercase : Union[str, Any] = resample if resample is not None else self.resample
lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Dict = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A )
lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase : int = image_std if image_std is not None else self.image_std
lowercase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : List[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Union[str, Any] = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
lowercase : Union[str, Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
lowercase : Any = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
lowercase : str = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
lowercase : Dict = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
lowercase : List[Any] = [to_channel_dimension_format(_A , _A ) for image in images]
lowercase : Any = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase : List[str] = grid[0]
for row_n in range(1 , len(__magic_name__ ) ):
lowercase : Optional[Any] = grid[row_n]
lowercase : List[Any] = fill_row(__magic_name__ , __magic_name__ )
lowercase : Optional[int] = grid[row_n]
return grid[-1][-1]
def snake_case( __magic_name__ , __magic_name__ ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__magic_name__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
import math
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__magic_name__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 308
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 1
|
import itertools
import string
from collections.abc import Generator, Iterable
def snake_case( __magic_name__ , __magic_name__ ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase : Tuple = iter(__magic_name__ )
while True:
lowercase : str = tuple(itertools.islice(__magic_name__ , __magic_name__ ) )
if not chunk:
return
yield chunk
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[Any] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase : Dict = ''''''
if len(__magic_name__ ) < 2:
return dirty
for i in range(len(__magic_name__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__magic_name__ ) & 1:
clean += "X"
return clean
def snake_case( __magic_name__ ) -> list[str]:
'''simple docstring'''
lowercase : Any = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase : List[str] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__magic_name__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__magic_name__ )
return table
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[int] = generate_table(__magic_name__ )
lowercase : Union[str, Any] = prepare_input(__magic_name__ )
lowercase : List[str] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
lowercase , lowercase : Optional[Any] = divmod(table.index(__magic_name__ ) , 5 )
lowercase , lowercase : Union[str, Any] = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[int] = generate_table(__magic_name__ )
lowercase : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ , 2 ):
lowercase , lowercase : Dict = divmod(table.index(__magic_name__ ) , 5 )
lowercase , lowercase : Any = divmod(table.index(__magic_name__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 308
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 1
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
if curr_ind == len(__magic_name__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__magic_name__ ) ):
if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# Insert current vertex into path as next transition
lowercase : Optional[int] = next_ver
# Validate created path
if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ):
return True
# Backtrack
lowercase : List[Any] = -1
return False
def snake_case( __magic_name__ , __magic_name__ = 0 ) -> list[int]:
'''simple docstring'''
lowercase : Any = [-1] * (len(__magic_name__ ) + 1)
# initialize start and end of path with starting index
lowercase : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , ) -> List[str]:
'''simple docstring'''
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> Optional[Any]:
'''simple docstring'''
lowercase : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase : Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase : int = '''cpu'''
lowercase : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=__magic_name__ ).to(__magic_name__ )
lowercase : Tuple = Path(__magic_name__ )
# TEXT ENCODER
lowercase : List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase : Optional[Any] = pipeline.text_encoder.config.hidden_size
lowercase : Dict = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__magic_name__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=__magic_name__ , )
del pipeline.text_encoder
# UNET
lowercase : Tuple = pipeline.unet.config.in_channels
lowercase : str = pipeline.unet.config.sample_size
lowercase : str = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(2 ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(2 , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=__magic_name__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=__magic_name__ , use_external_data_format=__magic_name__ , )
lowercase : List[str] = str(unet_path.absolute().as_posix() )
lowercase : Optional[int] = os.path.dirname(__magic_name__ )
lowercase : Optional[Any] = onnx.load(__magic_name__ )
# clean up existing tensor files
shutil.rmtree(__magic_name__ )
os.mkdir(__magic_name__ )
# collate external tensor files into one
onnx.save_model(
__magic_name__ , __magic_name__ , save_as_external_data=__magic_name__ , all_tensors_to_one_file=__magic_name__ , location='''weights.pb''' , convert_attribute=__magic_name__ , )
del pipeline.unet
# VAE ENCODER
lowercase : Union[str, Any] = pipeline.vae
lowercase : Tuple = vae_encoder.config.in_channels
lowercase : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase : int = lambda __magic_name__ , __magic_name__ : vae_encoder.encode(__magic_name__ , __magic_name__ )[0].sample()
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__magic_name__ , )
# VAE DECODER
lowercase : int = pipeline.vae
lowercase : Union[str, Any] = vae_decoder.config.latent_channels
lowercase : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase : Optional[Any] = vae_encoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__magic_name__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase : Dict = pipeline.safety_checker
lowercase : Union[str, Any] = safety_checker.config.vision_config.num_channels
lowercase : str = safety_checker.config.vision_config.image_size
lowercase : Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __magic_name__ , __magic_name__ , __magic_name__ , ).to(device=__magic_name__ , dtype=__magic_name__ ),
torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=__magic_name__ , )
del pipeline.safety_checker
lowercase : Dict = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase : Dict = pipeline.feature_extractor
else:
lowercase : int = None
lowercase : List[Any] = None
lowercase : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__magic_name__ )
print('''ONNX pipeline saved to''' , __magic_name__ )
del pipeline
del onnx_pipeline
lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(__magic_name__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCAmelCase_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 308
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
lowerCAmelCase_ = 'file'
@pytest.fixture(scope='''session''' )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Dict = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase : List[str] = bytes(__magic_name__ , '''utf-8''' )
with zstd.open(__magic_name__ , '''wb''' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __magic_name__ ) , '''w''' ) as f:
f.write(__magic_name__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase : Union[str, Any] = input_paths[compression_format]
lowercase : Optional[Any] = tmp_path / '''cache'''
lowercase : List[Any] = DownloadConfig(cache_dir=__magic_name__ , extract_compressed_file=__magic_name__ )
lowercase : Union[str, Any] = cached_path(__magic_name__ , download_config=__magic_name__ )
with open(__magic_name__ ) as f:
lowercase : Optional[Any] = f.read()
with open(__magic_name__ ) as f:
lowercase : Any = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = '''custom_cache'''
lowercase : Optional[Any] = '''custom_extracted_dir'''
lowercase : Union[str, Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase : str = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __magic_name__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__magic_name__ ) )
lowercase : Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : Tuple = xz_file
lowercase : List[Any] = (
DownloadConfig(extract_compressed_file=__magic_name__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__magic_name__ )
)
lowercase : Any = cached_path(__magic_name__ , download_config=__magic_name__ )
assert Path(__magic_name__ ).parent.parts[-2:] == expected
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Tuple = str(Path(__magic_name__ ).resolve() )
assert cached_path(__magic_name__ ) == text_file
# relative path
lowercase : List[Any] = str(Path(__magic_name__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__magic_name__ ) == text_file
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
# relative path
lowercase : str = '''./__missing_file__.txt'''
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__magic_name__ ) as f:
lowercase : Optional[int] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( ) -> int:
'''simple docstring'''
with pytest.raises(__magic_name__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
http_get('''https://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
fsspec_head('''s3://huggingface.co''' )
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
class _A :
def __init__( self : Dict , _A : int ) -> Any:
"""simple docstring"""
lowercase : List[Any] = n
lowercase : Optional[int] = [None] * self.n
lowercase : Tuple = 0 # index of the first element
lowercase : List[str] = 0
lowercase : int = 0
def __len__( self : str ) -> int:
"""simple docstring"""
return self.size
def __a ( self : List[str] ) -> bool:
"""simple docstring"""
return self.size == 0
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def __a ( self : Tuple , _A : List[str] ) -> str:
"""simple docstring"""
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
lowercase : str = data
lowercase : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def __a ( self : Dict ) -> int:
"""simple docstring"""
if self.size == 0:
raise Exception('''UNDERFLOW''' )
lowercase : Optional[Any] = self.array[self.front]
lowercase : Any = None
lowercase : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from __future__ import annotations
import requests
def snake_case( __magic_name__ ) -> dict:
'''simple docstring'''
lowercase : Tuple = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__magic_name__ ).json()
def snake_case( __magic_name__ = 10 ) -> list[dict]:
'''simple docstring'''
lowercase : List[str] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowercase : Dict = requests.get(__magic_name__ ).json()[:max_stories]
return [get_hackernews_story(__magic_name__ ) for story_id in story_ids]
def snake_case( __magic_name__ = 10 ) -> str:
'''simple docstring'''
lowercase : Any = hackernews_top_stories(__magic_name__ )
return "\n".join('''* [{title}]({url})'''.format(**__magic_name__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Any = '''yolos'''
def __init__( self : Optional[Any] , _A : Dict=768 , _A : Optional[int]=12 , _A : List[str]=12 , _A : Dict=3_072 , _A : Union[str, Any]="gelu" , _A : List[str]=0.0 , _A : Optional[int]=0.0 , _A : List[Any]=0.02 , _A : Optional[Any]=1E-12 , _A : Tuple=[512, 864] , _A : List[str]=16 , _A : int=3 , _A : Tuple=True , _A : Optional[int]=100 , _A : Optional[int]=True , _A : Tuple=False , _A : List[Any]=1 , _A : Any=5 , _A : Any=2 , _A : str=5 , _A : List[Any]=2 , _A : Any=0.1 , **_A : List[str] , ) -> str:
"""simple docstring"""
super().__init__(**_A )
lowercase : Optional[int] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : str = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Any = initializer_range
lowercase : List[str] = layer_norm_eps
lowercase : List[Any] = image_size
lowercase : Dict = patch_size
lowercase : Any = num_channels
lowercase : Union[str, Any] = qkv_bias
lowercase : str = num_detection_tokens
lowercase : List[Any] = use_mid_position_embeddings
lowercase : Tuple = auxiliary_loss
# Hungarian matcher
lowercase : Dict = class_cost
lowercase : Tuple = bbox_cost
lowercase : str = giou_cost
# Loss coefficients
lowercase : List[str] = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = version.parse('''1.11''' )
@property
def __a ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __a ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def __a ( self : Any ) -> int:
"""simple docstring"""
return 12
| 308
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_ = 25_60_47
lowerCAmelCase_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = NllbTokenizer
_UpperCamelCase : Optional[Any] = NllbTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Optional[int] = {}
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : str = NllbTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = NllbTokenizer(_A , keep_accents=_A )
lowercase : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase : str = tempfile.mkdtemp()
lowercase : Any = tokenizer_r.save_pretrained(_A )
lowercase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase : int = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : int = tokenizer_r.from_pretrained(_A )
lowercase : Tuple = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
lowercase : Dict = tempfile.mkdtemp()
lowercase : Any = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : int = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : Optional[int] = tokenizer_r.from_pretrained(_A )
lowercase : Optional[int] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
lowercase : Optional[int] = tempfile.mkdtemp()
lowercase : str = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : Any = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Optional[int] = tokenizer_r.from_pretrained(_A )
lowercase : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
if not self.test_seqaseq:
return
lowercase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowercase : Tuple = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowercase : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowercase : int = tokenizer.prepare_seqaseq_batch(
src_texts=_A , tgt_texts=_A , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowercase : Any = tokenizer.prepare_seqaseq_batch(
_A , tgt_texts=_A , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowercase : str = tokenizer.prepare_seqaseq_batch(
src_texts=_A , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , _A )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def __a ( self : Any ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Dict = [AddedToken('''<special>''' , lstrip=_A )]
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , **_A )
lowercase : List[str] = tokenizer_r.encode('''Hey this is a <special> token''' )
lowercase : int = tokenizer_r.encode('''<special>''' , add_special_tokens=_A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowercase : str = self.rust_tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , **_A , )
lowercase : Any = self.tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , **_A )
lowercase : Dict = tokenizer_p.encode('''Hey this is a <special> token''' )
lowercase : str = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = '''facebook/nllb-200-distilled-600M'''
_UpperCamelCase : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCamelCase : str = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __a ( cls : str ) -> str:
"""simple docstring"""
lowercase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
lowercase : List[str] = 1
return cls
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256_057 )
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
lowercase : List[str] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
lowercase : Tuple = self.tokenizer.decode(_A , skip_special_tokens=_A )
lowercase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _A )
lowercase : int = 10
lowercase : List[Any] = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _A )
self.assertEqual(len(_A ) , _A )
def __a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [256_203, 3] )
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : Dict = tempfile.mkdtemp()
lowercase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
lowercase : Optional[Any] = NllbTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase : str = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowercase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(_A , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Dict = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
lowercase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
lowercase : Dict = targets['''input_ids''']
lowercase : str = shift_tokens_right(
_A , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[256_047, 70, 7_356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256_057,
} , )
@require_torch
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase : Any = True
lowercase : Any = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
lowercase : Union[str, Any] = False
lowercase : Tuple = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 308
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
def snake_case( __magic_name__ = 10 , __magic_name__ = 10_00 , __magic_name__ = True ) -> int:
'''simple docstring'''
assert (
isinstance(__magic_name__ , __magic_name__ )
and isinstance(__magic_name__ , __magic_name__ )
and isinstance(__magic_name__ , __magic_name__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
assert (
isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__magic_name__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowercase : Dict = lower
lowercase : str = higher
lowercase : List[str] = []
while True:
lowercase : Tuple = get_avg(__magic_name__ , __magic_name__ )
last_numbers.append(__magic_name__ )
if answer(__magic_name__ ) == "low":
lowercase : Any = number
elif answer(__magic_name__ ) == "high":
lowercase : Optional[Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def snake_case( ) -> None:
'''simple docstring'''
lowercase : int = int(input('''Enter lower value : ''' ).strip() )
lowercase : Optional[Any] = int(input('''Enter high value : ''' ).strip() )
lowercase : List[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 308
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 1
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = 'pytorch_model.bin'
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''The name of the task to train on.'''} , )
_UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowercase : Union[str, Any] = dataset.filter(lambda __magic_name__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowercase : int = int(eval_result * len(__magic_name__ ) )
print(__magic_name__ )
lowercase : List[Any] = dataset.sort('''probability''' , reverse=__magic_name__ )
lowercase : Optional[int] = dataset.select(range(__magic_name__ ) )
lowercase : Any = dataset.remove_columns(['''label''', '''probability'''] )
lowercase : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
lowercase : List[str] = dataset.map(lambda __magic_name__ : {"label": idalabel[example["label"]]} )
lowercase : Union[str, Any] = dataset.shuffle(seed=args.seed )
lowercase : Any = os.path.join(__magic_name__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__magic_name__ , index=__magic_name__ )
else:
dataset.to_json(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowercase : Optional[Any] = STModelArguments(model_name_or_path=__magic_name__ )
lowercase : Optional[Any] = STDataArguments(train_file=__magic_name__ , infer_file=__magic_name__ )
lowercase : Tuple = STTrainingArguments(output_dir=__magic_name__ )
lowercase : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__magic_name__ ).items():
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
for key, value in kwargs.items():
if hasattr(__magic_name__ , __magic_name__ ):
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
# Sanity checks
lowercase : Optional[int] = {}
lowercase : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowercase : Tuple = args.train_file
lowercase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowercase : Union[str, Any] = args.eval_file
for key in data_files:
lowercase : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowercase : Optional[Any] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowercase : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowercase : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
accelerator.wait_for_everyone()
lowercase : Any = None
lowercase : Optional[int] = None
lowercase : str = 0
lowercase : Optional[int] = False
# Show the progress bar
lowercase : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowercase : Optional[int] = data_dir_format(__magic_name__ )
assert os.path.exists(__magic_name__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowercase : Union[str, Any] = os.path.join(__magic_name__ , '''stage-1''' )
lowercase : List[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__magic_name__ , __magic_name__ ):
arguments_dict.update({key: value} )
lowercase : Dict = os.path.join(__magic_name__ , '''best-checkpoint''' , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , __magic_name__ , __magic_name__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , __magic_name__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowercase : List[str] = os.path.join(__magic_name__ , '''best-checkpoint''' )
lowercase : List[str] = os.path.join(__magic_name__ , '''stage-2''' )
# Update arguments_dict
lowercase : Dict = model_path
lowercase : int = data_files['''train''']
lowercase : Dict = current_output_dir
lowercase : Optional[Any] = os.path.join(__magic_name__ , '''best-checkpoint''' , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , __magic_name__ , __magic_name__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , __magic_name__ )
lowercase : int = iteration
lowercase : int = data_dir_format(iteration + 1 )
lowercase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(__magic_name__ , '''best-checkpoint''' ) )
lowercase : Union[str, Any] = config.idalabel
lowercase : List[str] = os.path.join(__magic_name__ , '''eval_results_best-checkpoint.json''' )
lowercase : List[Any] = os.path.join(__magic_name__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(__magic_name__ )
with open(__magic_name__ , '''r''' ) as f:
lowercase : List[Any] = float(json.load(__magic_name__ )[args.eval_metric] )
lowercase : Optional[Any] = os.path.join(__magic_name__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__magic_name__ )
# Loading the dataset from local csv or json files.
lowercase : List[Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowercase : str = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__magic_name__ ):
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.wait_for_everyone()
lowercase : Any = os.path.join(__magic_name__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowercase : Tuple = eval_result
if best_iteration is None:
lowercase : Optional[int] = new_iteration
lowercase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowercase : Optional[Any] = new_iteration
lowercase : Optional[int] = new_eval_result
lowercase : Any = 0
else:
if new_eval_result == best_eval_result:
lowercase : Tuple = new_iteration
lowercase : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowercase : Any = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , __magic_name__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__magic_name__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__magic_name__ , '''eval_results_best-iteration.json''' ) , )
| 308
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 1
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case( ) -> int:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__magic_name__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def snake_case( ) -> List[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def snake_case( ) -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__magic_name__ ):
http_head('''https://huggingface.co''' )
| 308
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 1
|
lowerCAmelCase_ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
assert type(__magic_name__ ) in (int, float) and decimal == int(__magic_name__ )
lowercase : List[str] = int(__magic_name__ )
lowercase : Tuple = ''''''
lowercase : str = False
if decimal < 0:
lowercase : Dict = True
decimal *= -1
while decimal > 0:
lowercase , lowercase : Tuple = divmod(__magic_name__ , 16 )
lowercase : List[str] = values[remainder] + hexadecimal
lowercase : Any = '''0x''' + hexadecimal
if negative:
lowercase : Dict = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
from __future__ import annotations
import bisect
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowercase : Union[str, Any] = len(__magic_name__ )
while lo < hi:
lowercase : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase : Dict = mid + 1
else:
lowercase : List[str] = mid
return lo
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowercase : Dict = len(__magic_name__ )
while lo < hi:
lowercase : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase : Optional[Any] = mid + 1
else:
lowercase : Optional[int] = mid
return lo
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> int | None:
'''simple docstring'''
lowercase : int = 0
lowercase : Union[str, Any] = len(__magic_name__ ) - 1
while left <= right:
lowercase : str = left + (right - left) // 2
lowercase : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase : List[Any] = midpoint - 1
else:
lowercase : Tuple = midpoint + 1
return None
def snake_case( __magic_name__ , __magic_name__ ) -> int | None:
'''simple docstring'''
lowercase : Any = bisect.bisect_left(__magic_name__ , __magic_name__ )
if index != len(__magic_name__ ) and sorted_collection[index] == item:
return index
return None
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int | None:
'''simple docstring'''
if right < left:
return None
lowercase : Optional[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 )
else:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase_ = sorted(int(item) for item in user_input.split(','))
lowerCAmelCase_ = int(input('Enter a single number to be found in the list:\n'))
lowerCAmelCase_ = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 308
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = (DPMSolverSDEScheduler,)
_UpperCamelCase : Union[str, Any] = 1_0
def __a ( self : int , **_A : Any ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def __a ( self : str ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def __a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __a ( self : int ) -> str:
"""simple docstring"""
lowercase : int = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : Union[str, Any] = self.dummy_model()
lowercase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : Optional[Any] = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Union[str, Any] = scheduler.scale_model_input(_A , _A )
lowercase : Optional[int] = model(_A , _A )
lowercase : List[Any] = scheduler.step(_A , _A , _A )
lowercase : Optional[Any] = output.prev_sample
lowercase : Optional[int] = torch.sum(torch.abs(_A ) )
lowercase : Union[str, Any] = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase : int = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : List[Any] = self.dummy_model()
lowercase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : Optional[Any] = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
lowercase : int = scheduler.scale_model_input(_A , _A )
lowercase : Dict = model(_A , _A )
lowercase : Optional[Any] = scheduler.step(_A , _A , _A )
lowercase : List[str] = output.prev_sample
lowercase : Dict = torch.sum(torch.abs(_A ) )
lowercase : int = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
lowercase : Tuple = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase : Any = scheduler.scale_model_input(_A , _A )
lowercase : List[Any] = model(_A , _A )
lowercase : List[str] = scheduler.step(_A , _A , _A )
lowercase : Any = output.prev_sample
lowercase : Any = torch.sum(torch.abs(_A ) )
lowercase : Optional[Any] = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = self.scheduler_classes[0]
lowercase : Dict = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
lowercase : List[str] = self.dummy_model()
lowercase : Optional[int] = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
lowercase : List[str] = sample.to(_A )
for t in scheduler.timesteps:
lowercase : Union[str, Any] = scheduler.scale_model_input(_A , _A )
lowercase : Dict = model(_A , _A )
lowercase : Tuple = scheduler.step(_A , _A , _A )
lowercase : Dict = output.prev_sample
lowercase : Optional[int] = torch.sum(torch.abs(_A ) )
lowercase : Union[str, Any] = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase_ = 'bert-base-cased'
lowerCAmelCase_ = 'fp16'
lowerCAmelCase_ = 'bf16'
lowerCAmelCase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _A ( _lowerCamelCase ):
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
super().setUp()
lowercase : Optional[int] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_A ):
lowercase : int = self.dist_env.copy()
lowercase : Dict = f"""{i + 1}"""
lowercase : Dict = strategy
with mockenv_context(**_A ):
lowercase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_A ):
lowercase : List[str] = self.dist_env.copy()
lowercase : Any = prefetch_policy
with mockenv_context(**_A ):
lowercase : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_A ):
lowercase : int = self.dist_env.copy()
lowercase : Union[str, Any] = state_dict_type
with mockenv_context(**_A ):
lowercase : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = AutoModel.from_pretrained(_A )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase : List[Any] = self.dist_env.copy()
lowercase : Optional[Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase : Dict = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowercase : Tuple = '''2000'''
with mockenv_context(**_A ):
lowercase : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase : Any = self.dist_env.copy()
lowercase : Dict = '''TRANSFORMER_BASED_WRAP'''
lowercase : str = '''T5Layer'''
with mockenv_context(**_A ):
lowercase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(_A ) as cm:
fsdp_plugin.set_auto_wrap_policy(_A )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
lowercase : Optional[Any] = self.dist_env.copy()
lowercase : Optional[int] = '''SIZE_BASED_WRAP'''
lowercase : Any = '''0'''
with mockenv_context(**_A ):
lowercase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase : str = self.dist_env.copy()
lowercase : Optional[int] = mp_dtype
with mockenv_context(**_A ):
lowercase : Any = Accelerator()
if mp_dtype == "fp16":
lowercase : int = torch.floataa
elif mp_dtype == "bf16":
lowercase : Optional[Any] = torch.bfloataa
lowercase : List[str] = MixedPrecision(param_dtype=_A , reduce_dtype=_A , buffer_dtype=_A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_A )
def __a ( self : str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase : Optional[Any] = self.dist_env.copy()
lowercase : List[str] = str(_A ).lower()
with mockenv_context(**_A ):
lowercase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_A ) )
@require_fsdp
@require_multi_gpu
@slow
class _A ( _lowerCamelCase ):
def __a ( self : int ) -> List[str]:
"""simple docstring"""
super().setUp()
lowercase : List[str] = 0.82
lowercase : Optional[Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowercase : List[str] = {
'''multi_gpu_fp16''': 3_200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase : str = 160
lowercase : Union[str, Any] = 160
lowercase : Optional[int] = inspect.getfile(accelerate.test_utils )
lowercase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : str = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
lowercase : Any = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowercase : int = cmd.copy()
for i, strategy in enumerate(_A ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
lowercase : int = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_A ):
lowercase : Dict = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowercase : str = len(_A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase : Optional[int] = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
lowercase : Union[str, Any] = cmd_config[:-1]
lowercase : List[Any] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
def __a ( self : int ) -> int:
"""simple docstring"""
lowercase : List[Any] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
lowercase : int = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase : Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_A ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
| 308
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 1
|
from __future__ import annotations
from random import choice
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return choice(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[str] = random_pivot(__magic_name__ )
# partition based on pivot
# linear time
lowercase : Union[str, Any] = [e for e in lst if e < pivot]
lowercase : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__magic_name__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__magic_name__ ) < k - 1:
return kth_number(__magic_name__ , k - len(__magic_name__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 1
|
def snake_case( __magic_name__ = 10_00 ) -> int:
'''simple docstring'''
lowercase : str = 2**power
lowercase : Dict = 0
while n:
lowercase , lowercase : List[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 308
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.