code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , a = 6_55_36 , a = None , a = 2 , a = 2 , a = 0 , a = "fourier" , a = True , a = False , a = 0.0 , a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , a = "UNetMidBlock1D" , a = None , a = (32, 32, 64) , a = None , a = 8 , a = 1 , a = False , ) -> Union[str, Any]:
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=a , log=a , flip_sin_to_cos=a )
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=a , downscale_freq_shift=a )
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=a , time_embed_dim=a , act_fn=a , out_dim=block_out_channels[0] , )
snake_case_ = nn.ModuleList([] )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(a ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(a ) - 1
snake_case_ = get_down_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(a )
# mid
snake_case_ = get_mid_block(
a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=a , add_downsample=a , )
# up
snake_case_ = list(reversed(a ) )
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(a ):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(a ) - 1 else final_upsample_channels
)
snake_case_ = i == len(a ) - 1
snake_case_ = get_up_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(a )
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case_ = get_out_block(
out_block_type=a , num_groups_out=a , embed_dim=block_out_channels[0] , out_channels=a , act_fn=a , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCamelCase ( self , a , a , a = True , ) -> Union[UNetaDOutput, Tuple]:
snake_case_ = timestep
if not torch.is_tensor(a ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(sample.device )
snake_case_ = self.time_proj(a )
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(a )
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=a , temb=a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(a , a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(a , res_hidden_states_tuple=a , temb=a )
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(a , a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=a )
| 178 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( a_ , a_ , a_ , a_=None):
# Initialise PyTorch model
snake_case_ = XLNetConfig.from_json_file(a_)
snake_case_ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
snake_case_ = finetuning_task
snake_case_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case_ = XLNetForSequenceClassification(a_)
elif "squad" in finetuning_task:
snake_case_ = finetuning_task
snake_case_ = XLNetForQuestionAnswering(a_)
else:
snake_case_ = XLNetLMHeadModel(a_)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a_ , a_ , a_)
# Save pytorch-model
snake_case_ = os.path.join(a_ , a_)
snake_case_ = os.path.join(a_ , a_)
print(f'''Save PyTorch model to {os.path.abspath(a_)}''')
torch.save(model.state_dict() , a_)
print(f'''Save configuration file to {os.path.abspath(a_)}''')
with open(a_ , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 178 | 1 |
UpperCAmelCase__ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
UpperCAmelCase__ = ["a", "b", "c", "d", "e"]
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = start
# add current to visited
visited.append(_UpperCAmelCase )
_UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_UpperCAmelCase = topological_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(_UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
_UpperCAmelCase = topological_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase__ = topological_sort("a", [], [])
print(sort)
| 290 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = CLIPTokenizer
UpperCamelCase = CLIPTokenizerFast
UpperCamelCase = True
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Optional[Any] , **A : str) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , **A : Dict) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_UpperCAmelCase = tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A)
@require_ftfy
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase = 'xa\u0303y' + ' ' + 'x\xe3y'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = F"{text_of_1_token} {text_of_1_token}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A) + 1, len(A) + 1 + len(A)) , )
_UpperCAmelCase = F" {text}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A) + 1, 1 + len(A) + 1 + len(A)) , )
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
with self.assertRaises(A) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer')
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.'))
@require_ftfy
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
| 290 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase ( lowerCAmelCase__ = 8 ):
'''simple docstring'''
lowercase = ascii_letters + digits + punctuation
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(snake_case__ )
lowercase = i // 3
lowercase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase = (
chars_incl
+ random(snake_case__ , quotient + remainder )
+ random(snake_case__ , snake_case__ )
+ random(snake_case__ , snake_case__ )
)
lowercase = list(snake_case__ )
shuffle(snake_case__ )
return "".join(snake_case__ )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
pass # Put your code here...
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 8 ):
'''simple docstring'''
if len(snake_case__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase = any(char in ascii_uppercase for char in password )
lowercase = any(char in ascii_lowercase for char in password )
lowercase = any(char in digits for char in password )
lowercase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(snake_case__ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(snake_case__ , snake_case__ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 101 | from __future__ import annotations
_SCREAMING_SNAKE_CASE = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_A = graph
# mapping node to its parent in resulting breadth first tree
_A = {}
_A = source_vertex
def UpperCAmelCase ( self ) -> None:
_A = {self.source_vertex}
_A = None
_A = [self.source_vertex] # first in first out queue
while queue:
_A = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase_ )
_A = vertex
queue.append(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_A = self.parent.get(lowerCAmelCase_ )
if target_vertex_parent is None:
_A = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCAmelCase_ )
return self.shortest_path(lowerCAmelCase_ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 180 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_snake_case = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : int = TextaTextGenerationPipeline(model=A , tokenizer=A )
return generator, ["Something to write", "Something else"]
def UpperCAmelCase ( self , A , A ) -> Dict:
snake_case : Any = generator('''Something there''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
snake_case : Optional[int] = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
snake_case : List[Any] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
with self.assertRaises(A ):
generator(4 )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
snake_case : Dict = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
snake_case : int = generator('''Something there''' , do_sample=A )
self.assertEqual(A , [{'''generated_text''': ''''''}] )
snake_case : str = 3
snake_case : int = generator(
'''Something there''' , num_return_sequences=A , num_beams=A , )
snake_case : Any = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(A , A )
snake_case : List[str] = generator('''This is a test''' , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
snake_case : Any = generator.model.config.eos_token_id
snake_case : Any = """<pad>"""
snake_case : Optional[int] = generator(
['''This is a test''', '''This is a second test'''] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> int:
snake_case : int = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
snake_case : List[Any] = generator('''Something there''' , do_sample=A )
self.assertEqual(A , [{'''generated_text''': ''''''}] )
| 363 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : List[str] = first_str.lower().strip()
snake_case : List[str] = second_str.lower().strip()
# Remove whitespace
snake_case : Any = first_str.replace(""" """ ,"""""" )
snake_case : List[str] = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
snake_case : defaultdict[str, int] = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] = input('Enter the first string ').strip()
lowerCamelCase : Optional[int] = input('Enter the second string ').strip()
lowerCamelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 176 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : int = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
A__ : List[str] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = DistilBertTokenizer
def __init__( self : List[Any], lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : str=True, lowerCamelCase : Optional[int]="[UNK]", lowerCamelCase : Optional[Any]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Any="[CLS]", lowerCamelCase : Union[str, Any]="[MASK]", lowerCamelCase : str=True, lowerCamelCase : int=None, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 207 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=13 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : str=24 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : int=6 , __UpperCAmelCase : int=37 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=1_000 , ) ->List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = scope
a = range_bbox
def __lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = t
a = None
if self.use_input_mask:
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , ) ->Any:
"""simple docstring"""
a = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
a = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
a = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
a = self.num_labels
a = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , ) ->Any:
"""simple docstring"""
a = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) ->Tuple:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = LiltModelTester(self )
a = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_SCREAMING_SNAKE_CASE )
a = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
a = torch.Size([1, 2, 768] )
a = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class lowerCAmelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] , **lowercase_ : List[Any]):
'''simple docstring'''
super().__init__(**_A)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Union[str, Any] , lowercase_ : Tuple , **lowercase_ : Optional[Any]):
'''simple docstring'''
return super().__call__(_A , **_A)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int="This is a photo of {}."):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(_A)
SCREAMING_SNAKE_CASE_ : str = self.image_processor(images=[image] , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_ : Optional[int] = candidate_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = [hypothesis_template.format(_A) for x in candidate_labels]
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(_A , return_tensors=self.framework , padding=_A)
SCREAMING_SNAKE_CASE_ : List[Any] = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = model_inputs.pop('''candidate_labels''')
SCREAMING_SNAKE_CASE_ : str = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , _A):
SCREAMING_SNAKE_CASE_ : Any = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_ : Tuple = text_inputs[0][0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model(**_A , **_A)
SCREAMING_SNAKE_CASE_ : str = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = model_outputs.pop('''candidate_labels''')
SCREAMING_SNAKE_CASE_ : str = model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Tuple = logits.softmax(dim=-1).squeeze(-1)
SCREAMING_SNAKE_CASE_ : List[str] = probs.tolist()
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ : List[Any] = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ : int = stable_softmax(_A , axis=-1)
SCREAMING_SNAKE_CASE_ : List[str] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
SCREAMING_SNAKE_CASE_ : Tuple = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A) , key=lambda lowercase_: -x[0])
]
return result
| 91 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "▁"
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase_ = {
"google/reformer-crime-and-punishment": 524_288,
}
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any =["input_ids", "attention_mask"]
def __init__( self :List[str] , _lowercase :List[str] , _lowercase :List[Any]="</s>" , _lowercase :List[str]="<unk>" , _lowercase :Dict=[] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Optional[int] , ) -> None:
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :Any) -> List[Any]:
return self.sp_model.get_piece_size()
def __a ( self :Any) -> Dict[str, int]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :List[str]) -> int:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :str) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase)
def __a ( self :Tuple , _lowercase :Tuple) -> Tuple:
return self.sp_model.piece_to_id(_lowercase)
def __a ( self :Optional[Any] , _lowercase :Tuple) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase_ = self.sp_model.IdToPiece(_lowercase)
return token
def __a ( self :Any , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_lowercase)
out_string += self.sp_model.decode(_lowercase)
return out_string.strip()
def __a ( self :Optional[Any] , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 1 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case ,int(b / 2 ) ) * actual_power(__snake_case ,int(b / 2 ) )
else:
return a * actual_power(__snake_case ,int(b / 2 ) ) * actual_power(__snake_case ,int(b / 2 ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__snake_case ,__snake_case )
return actual_power(__snake_case ,__snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 209 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 209 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=1 / 255 , __SCREAMING_SNAKE_CASE : List[str]=True , ):
'''simple docstring'''
__a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=False):
'''simple docstring'''
if not batched:
__a = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size['''shortest_edge'''] * h / w)
__a = self.size['''shortest_edge''']
elif w > h:
__a = self.size['''shortest_edge''']
__a = int(self.size['''shortest_edge'''] * w / h)
else:
__a = self.size['''shortest_edge''']
__a = self.size['''shortest_edge''']
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__a = max(__lowerCAmelCase , key=lambda __SCREAMING_SNAKE_CASE: item[0])[0]
__a = max(__lowerCAmelCase , key=lambda __SCREAMING_SNAKE_CASE: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = DeformableDetrImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad'''))
self.assertTrue(hasattr(__lowerCAmelCase , '''size'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , __lowerCAmelCase)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , __lowerCAmelCase)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase)
__a = image_processing(__lowerCAmelCase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__lowerCAmelCase , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__lowerCAmelCase , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
__a = json.loads(f.read())
__a = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__a = DeformableDetrImageProcessor()
__a = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''')
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase)
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4))
# verify area
__a = torch.tensor([58_87.96_00, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase))
# verify boxes
__a = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase)
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3))
# verify image_id
__a = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase))
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase))
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase))
# verify orig_size
__a = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase))
# verify size
__a = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase))
@slow
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
__a = json.loads(f.read())
__a = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
__a = DeformableDetrImageProcessor(format='''coco_panoptic''')
__a = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''')
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase)
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4))
# verify area
__a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase))
# verify boxes
__a = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase)
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3))
# verify image_id
__a = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase))
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase))
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase))
# verify masks
__a = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase)
# verify orig_size
__a = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase))
# verify size
__a = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase))
| 371 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case :int = logging.get_logger(__name__)
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''question_encoder_tokenizer''')
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''generator_tokenizer''')
self.question_encoder.save_pretrained(__SCREAMING_SNAKE_CASE)
self.generator.save_pretrained(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE)
if config is None:
__a = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
def __call__( self : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.current_tokenizer(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self.generator.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.generator.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.question_encoder
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.generator
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __SCREAMING_SNAKE_CASE , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = labels['''input_ids''']
return model_inputs
| 131 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionInpaintPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase__ = frozenset([])
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=__a, )
_lowerCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=__a)
torch.manual_seed(0)
_lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, )
_lowerCAmelCase : List[Any] = CLIPTextModel(__a)
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(__a)).to(__a)
_lowerCAmelCase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowerCAmelCase : Dict = Image.fromarray(np.uinta(__a)).convert("RGB").resize((64, 64))
_lowerCAmelCase : List[str] = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__a).startswith("mps"):
_lowerCAmelCase : Any = torch.manual_seed(__a)
else:
_lowerCAmelCase : int = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : List[str] = StableDiffusionInpaintPipeline(**__a)
_lowerCAmelCase : Tuple = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Dict = sd_pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Tuple = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
_lowerCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
_lowerCAmelCase : List[Any] = "stabilityai/stable-diffusion-2-inpainting"
_lowerCAmelCase : int = StableDiffusionInpaintPipeline.from_pretrained(__a, safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCAmelCase : List[str] = torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = pipe(
prompt=__a, image=__a, mask_image=__a, generator=__a, output_type="np", )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
_lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
_lowerCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
_lowerCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
_lowerCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(
__a, torch_dtype=torch.floataa, safety_checker=__a, )
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0)
_lowerCAmelCase : Tuple = pipe(
prompt=__a, image=__a, mask_image=__a, generator=__a, output_type="np", )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def snake_case__ ( self):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
_lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
_lowerCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
_lowerCAmelCase : Tuple = PNDMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : int = StableDiffusionInpaintPipeline.from_pretrained(
__a, safety_checker=__a, scheduler=__a, torch_dtype=torch.floataa, )
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCAmelCase : int = torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = pipe(
prompt=__a, image=__a, mask_image=__a, generator=__a, num_inference_steps=2, output_type="np", )
_lowerCAmelCase : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 36 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=None ):
if subparsers is not None:
_snake_case = subparsers.add_parser("""test""" )
else:
_snake_case = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=_SCREAMING_SNAKE_CASE , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
_snake_case = script_name
else:
_snake_case = f"""--config_file={args.config_file} {script_name}"""
_snake_case = ["""accelerate-launch"""] + test_args.split()
_snake_case = execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = test_command_parser()
_snake_case = parser.parse_args()
test_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 270 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DiTPipeline
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase_ = False
def lowercase (self ) -> Union[str, Any]:
torch.manual_seed(0 )
_snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=UpperCAmelCase , )
_snake_case = AutoencoderKL()
_snake_case = DDIMScheduler()
_snake_case = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
if str(UpperCAmelCase ).startswith("""mps""" ):
_snake_case = torch.manual_seed(UpperCAmelCase )
else:
_snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_snake_case = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Union[str, Any]:
_snake_case = """cpu"""
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_snake_case = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def lowercase (self ) -> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase (self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self ) -> Any:
_snake_case = torch.manual_seed(0 )
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase (self ) -> Union[str, Any]:
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 270 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(_lowercase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 265 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 1 |
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] , _a : list[int] ):
UpperCamelCase__ = len(_a )
UpperCamelCase__ = [0] * len_array
if len_array > 0:
UpperCamelCase__ = array[0]
for i in range(1 , _a ):
UpperCamelCase__ = self.prefix_sum[i - 1] + array[i]
def A_ ( self : int , _a : int , _a : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A_ ( self : Union[str, Any] , _a : int ):
UpperCamelCase__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase__ = []
for token in postfix_notation:
if token in operations:
UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 - _cos) / 2
lowerCAmelCase = 1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[str] = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 + _cos) / 2
lowerCAmelCase = -1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : str = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = _sin / 2
lowerCAmelCase = 0
lowerCAmelCase = -ba
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Dict = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 1 - alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[Any] = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = 1 + alpha * big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha * big_a
lowerCAmelCase = 1 + alpha / big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha / big_a
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_A ) * alpha
lowerCAmelCase = big_a * (pmc + aaa)
lowerCAmelCase = 2 * big_a * mpc
lowerCAmelCase = big_a * (pmc - aaa)
lowerCAmelCase = ppmc + aaa
lowerCAmelCase = -2 * pmpc
lowerCAmelCase = ppmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(_A )
lowerCAmelCase = cos(_A )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(_A ) * alpha
lowerCAmelCase = big_a * (ppmc + aaa)
lowerCAmelCase = -2 * big_a * pmpc
lowerCAmelCase = big_a * (ppmc - aaa)
lowerCAmelCase = pmc + aaa
lowerCAmelCase = 2 * mpc
lowerCAmelCase = pmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 4 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_A = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCamelCase : Any = self.transformer_dir
shutil.copy(
os.path.join(UpperCAmelCase_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def _UpperCamelCase ( self ) -> Tuple:
lowerCamelCase : Any = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Optional[Any]:
lowerCamelCase : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase : Optional[Any] = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
lowerCamelCase : List[str] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCAmelCase_ , 'w' , newline='\n' ) as f:
f.write(UpperCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'r' ) as f:
self.assertTrue(f.read() , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : int = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Any:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCAmelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase : List[str] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , UpperCAmelCase_ , UpperCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCAmelCase_ , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCAmelCase_ ) , )
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : str = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCamelCase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCamelCase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCamelCase , lowerCamelCase : Optional[Any] = check_copies.convert_to_localized_md(
UpperCAmelCase_ , UpperCAmelCase_ , localized_readme['format_model_list'] )
self.assertFalse(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : List[str] = check_copies.convert_to_localized_md(
UpperCAmelCase_ , UpperCAmelCase_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCAmelCase_ )
lowerCamelCase : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCamelCase : str = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase , lowerCamelCase : Optional[Any] = check_copies.convert_to_localized_md(
UpperCAmelCase_ , UpperCAmelCase_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
from __future__ import annotations
from collections.abc import Generator
def _lowercase ( ) -> Generator[int, None, None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 2
while True:
SCREAMING_SNAKE_CASE__ = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ )
if factor:
SCREAMING_SNAKE_CASE__ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE__ = factor
else:
SCREAMING_SNAKE_CASE__ = prime
yield prime
prime += 1
def _lowercase ( UpperCamelCase_ = 1e10 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sieve()
SCREAMING_SNAKE_CASE__ = 1
while True:
SCREAMING_SNAKE_CASE__ = next(lowerCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 368 |
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : str ):
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = """ABAABA"""
__snake_case = """AB"""
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 169 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase__ :Dict = False
class lowercase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
lowercase = torch.manual_seed(0)
lowercase = pipe.dual_guided(
prompt='''first prompt''' ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__)
lowercase = VersatileDiffusionPipeline.from_pretrained(A__ ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = generator.manual_seed(0)
lowercase = pipe.dual_guided(
prompt='''first prompt''' ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def A__ ( self):
lowercase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = '''cyberpunk 2077'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
lowercase = torch.manual_seed(0)
lowercase = pipe.dual_guided(
prompt=A__ ,image=A__ ,text_to_image_strength=0.75 ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
lowercase = '''A painting of a squirrel eating a burger '''
lowercase = torch.manual_seed(0)
lowercase = pipe.text_to_image(
prompt=A__ ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''').images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
lowercase = pipe.image_variation(A__ ,generator=A__ ,output_type='''numpy''').images
lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 101 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =RoCBertTokenizer
lowercase_ : str =None
lowercase_ : Optional[Any] =False
lowercase_ : Any =True
lowercase_ : int =filter_non_english
def A__ ( self):
super().setUp()
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowercase = {}
lowercase = {}
for i, value in enumerate(A__):
lowercase = i
lowercase = i
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_shape_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''word_pronunciation_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.word_shape_file ,'''w''' ,encoding='''utf-8''') as word_shape_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
with open(self.word_pronunciation_file ,'''w''' ,encoding='''utf-8''') as word_pronunciation_writer:
json.dump(A__ ,A__ ,ensure_ascii=A__)
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.tokenize('''你好[SEP]你是谁''')
self.assertListEqual(A__ ,['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__) ,[5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__) ,[5, 6, 2, 5, 7, 8])
def A__ ( self):
lowercase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = RoCBertBasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = RoCBertWordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A__ ( self):
lowercase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
if self.test_rust_tokenizer:
lowercase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
lowercase = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''') else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''])
def A__ ( self):
lowercase = ['''的''', '''人''', '''有''']
lowercase = ''''''.join(A__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(A__)
]
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
@slow
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file)
lowercase = tokenizer.encode('''你好''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''你是谁''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A__ ( self):
lowercase = self.get_tokenizers(do_lower_case=A__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase = '''你好,你是谁'''
lowercase = tokenizer.tokenize(A__)
lowercase = tokenizer.convert_tokens_to_ids(A__)
lowercase = tokenizer.convert_tokens_to_shape_ids(A__)
lowercase = tokenizer.convert_tokens_to_pronunciation_ids(A__)
lowercase = tokenizer.prepare_for_model(
A__ ,A__ ,A__ ,add_special_tokens=A__)
lowercase = tokenizer.encode_plus(A__ ,add_special_tokens=A__)
self.assertEqual(A__ ,A__)
| 101 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase__ ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Tuple , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase__ )
UpperCAmelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase__ : List[str] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Any = get_size_dict(lowercase__ , param_name='''crop_size''' )
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : List[str] = resample
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Any , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Any = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowercase_ ( self : Optional[int] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCAmelCase__ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(lowercase__ , param_name='''crop_size''' )
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : int = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
UpperCAmelCase__ : Union[str, Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Union[str, Any] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ : Dict = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ : str = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
UpperCAmelCase__ : Any = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
UpperCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def lowercase_ ( self : Optional[int] , _A : Union[str, Any] , _A : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
UpperCAmelCase__ : Dict = target_sizes.numpy()
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(len(lowercase__ ) ):
UpperCAmelCase__ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase__ )
UpperCAmelCase__ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
UpperCAmelCase__ : Optional[int] = logits.argmax(dim=1 )
UpperCAmelCase__ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 358 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Union[str, Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCamelCase__ : Union[str, Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCamelCase__ : str = 0.01
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
UpperCamelCase__ : List[Any] = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Any:
UpperCamelCase__ : Optional[Any] = '''a''' * 1000 + '''.lock'''
UpperCamelCase__ : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 201 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = 32, __magic_name__=PILImageResampling.BILINEAR, __magic_name__ = True, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : int = do_resize
UpperCamelCase__ : Tuple = do_rescale
UpperCamelCase__ : Any = size_divisor
UpperCamelCase__ : List[Any] = resample
super().__init__(**__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : Any = height // size_divisor * size_divisor
UpperCamelCase__ : Optional[int] = width // size_divisor * size_divisor
UpperCamelCase__ : str = resize(__magic_name__, (new_h, new_w), resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
return image
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__=None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCamelCase__ : Union[str, Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCamelCase__ : str = [self.resize(__magic_name__, size_divisor=__magic_name__, resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(__magic_name__, scale=1 / 255 ) for image in images]
UpperCamelCase__ : Optional[Any] = [to_channel_dimension_format(__magic_name__, __magic_name__ ) for image in images]
UpperCamelCase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 201 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int]=1_0_2_4 ) ->Dict:
lowerCamelCase__ , lowerCamelCase__ : str =[], []
lowerCamelCase__ : Any =list(zip(snake_case_ , snake_case_ ) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =sorted_examples[0]
def is_too_big(snake_case_ : str ):
return tok(snake_case_ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase__ : List[Any] =new_src + ' ' + src
lowerCamelCase__ : Dict =new_tgt + ' ' + tgt
if is_too_big(snake_case_ ) or is_too_big(snake_case_ ): # cant fit, finalize example
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =src, tgt
else: # can fit, keep adding
lowerCamelCase__ , lowerCamelCase__ : Dict =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case_ )
finished_tgt.append(snake_case_ )
return finished_src, finished_tgt
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Path , snake_case_ : Optional[Any] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Tuple =Path(snake_case_ )
save_path.mkdir(exist_ok=snake_case_ )
for split in ["train"]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
lowerCamelCase__ : int =[x.rstrip() for x in Path(snake_case_ ).open().readlines()]
lowerCamelCase__ : Tuple =[x.rstrip() for x in Path(snake_case_ ).open().readlines()]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =pack_examples(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
print(f"""packed {split} split from {len(snake_case_ )} examples -> {len(snake_case_ )}.""" )
Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(snake_case_ ) )
Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(snake_case_ ) )
for split in ["val", "test"]:
lowerCamelCase__ , lowerCamelCase__ : List[str] =data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(snake_case_ , save_path / f"""{split}.source""" )
shutil.copyfile(snake_case_ , save_path / f"""{split}.target""" )
def lowerCAmelCase_ ( ) ->Any:
lowerCamelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=snake_case_ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=snake_case_ , default=1_2_8 )
parser.add_argument('--data_dir' , type=snake_case_ )
parser.add_argument('--save_path' , type=snake_case_ )
lowerCamelCase__ : Tuple =parser.parse_args()
lowerCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 126 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :int=[10, 20, 30, 40] , lowerCamelCase_ :Dict=[2, 2, 3, 2] , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=10 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ :List[str]=[2, 3, 4] , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : Optional[int] =num_stages
lowerCamelCase__ : Optional[int] =hidden_sizes
lowerCamelCase__ : Optional[Any] =depths
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Dict =num_labels
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Dict =out_features
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : Any =scope
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Tuple =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
lowerCamelCase__ : Any =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Optional[Any] =True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : List[str] =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ):
lowerCamelCase__ : Tuple =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] =ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.default_image_processor
lowerCamelCase__ : int =prepare_img()
lowerCamelCase__ : List[Any] =preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase__ : Dict =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 126 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
A__ = TOKENIZER_CLASSES
else:
A__ = {tokenizer_name: getattr(lowercase_ , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
A__ = TOKENIZER_CLASSES[tokenizer_name]
A__ = True
if checkpoint_name is None:
A__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
A__ = tokenizer_class.from_pretrained(lowercase_ , force_download=lowercase_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
A__, A__ = checkpoint.split("/" )
A__ = os.path.join(lowercase_ , lowercase_ )
elif add_prefix:
A__ = checkpoint
A__ = dump_path
else:
A__ = None
A__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ = file_path.split(lowercase_ )[-1][0]
if next_char == "/":
A__ = os.path.join(lowercase_ , lowercase_ )
A__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
A__ = tokenizer.save_pretrained(
lowercase_ , legacy_format=lowercase_ , filename_prefix=lowercase_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowercase_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 230 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
A__ = "ZinengTang/tvlt-base"
A__ = tempfile.mkdtemp()
def __magic_name__ ( self : int , **snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] , **snake_case_ : str ) -> List[str]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
A__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = feature_extractor(snake_case_ , return_tensors="np" )
A__ = processor(audio=snake_case_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([3, 224, 224] )
A__ = image_processor(snake_case_ , return_tensors="np" )
A__ = processor(images=snake_case_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = np.ones([12_000] )
A__ = np.ones([3, 224, 224] )
A__ = processor(audio=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_feature_extractor()
A__ = TvltProcessor(image_processor=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 230 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
return (preds == labels).mean()
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__snake_case, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__snake_case, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__snake_case, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
SCREAMING_SNAKE_CASE_ : int = field(
default=1_28, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
SCREAMING_SNAKE_CASE_ : bool = field(
default=__snake_case, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__lowerCAmelCase: Optional[int] = processors[data_args.task_name]()
__lowerCAmelCase: Optional[int] = processor.get_labels()
__lowerCAmelCase: Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase: str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCAmelCase: Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCAmelCase: List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__lowerCAmelCase: int = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCAmelCase: Tuple = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase: str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase: Union[str, Any] = trainer.evaluate()
__lowerCAmelCase: Any = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 217 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = """beit"""
def __init__( self : List[Any] , UpperCamelCase__ : List[str]=8_1_9_2 , UpperCamelCase__ : Dict=7_6_8 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : Dict=3_0_7_2 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Optional[Any]=1e-12 , UpperCamelCase__ : str=2_2_4 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : Optional[Any]=[1, 2, 3, 6] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=0.4 , UpperCamelCase__ : Optional[Any]=2_5_6 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=2_5_5 , **UpperCamelCase__ : Optional[int] , )-> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: str = vocab_size
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: str = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: Union[str, Any] = intermediate_size
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: List[Any] = attention_probs_dropout_prob
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: Optional[Any] = layer_norm_eps
__lowerCAmelCase: Any = image_size
__lowerCAmelCase: Any = patch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Tuple = use_mask_token
__lowerCAmelCase: Optional[Any] = use_absolute_position_embeddings
__lowerCAmelCase: List[Any] = use_relative_position_bias
__lowerCAmelCase: Optional[Any] = use_shared_relative_position_bias
__lowerCAmelCase: List[str] = layer_scale_init_value
__lowerCAmelCase: str = drop_path_rate
__lowerCAmelCase: str = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: Optional[Any] = out_indices
__lowerCAmelCase: Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[str] = use_auxiliary_head
__lowerCAmelCase: Union[str, Any] = auxiliary_loss_weight
__lowerCAmelCase: Optional[int] = auxiliary_channels
__lowerCAmelCase: Dict = auxiliary_num_convs
__lowerCAmelCase: List[Any] = auxiliary_concat_input
__lowerCAmelCase: str = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = version.parse("""1.11""" )
@property
def lowercase_ ( self : str)-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 217 | 1 |
from __future__ import annotations
class a :
def __init__( self , A_ = 0 ):
'''simple docstring'''
_UpperCAmelCase : Tuple = key
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
_UpperCAmelCase : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
_UpperCAmelCase : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def _UpperCAmelCase ( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
_UpperCAmelCase : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCAmelCase : List[Any] = ""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def _UpperCAmelCase ( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
_UpperCAmelCase : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_UpperCAmelCase : int = ""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def _UpperCAmelCase ( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 357 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
_lowercase ={}
state_dict.pop('''pixel_mean''' , __snake_case )
state_dict.pop('''pixel_std''' , __snake_case )
_lowercase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowercase =key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
_lowercase =int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
_lowercase =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_lowercase =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_lowercase =key.replace('''layers.2''' , '''proj_out''' )
_lowercase =value
_lowercase =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case="ybelkada/segment-anything" ) -> Tuple:
"""simple docstring"""
_lowercase =hf_hub_download(__snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_lowercase =SamConfig()
elif "sam_vit_l" in model_name:
_lowercase =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_lowercase =SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
_lowercase =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_lowercase =SamConfig(
vision_config=__snake_case , )
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
_lowercase =replace_keys(__snake_case )
_lowercase =SamImageProcessor()
_lowercase =SamProcessor(image_processor=__snake_case )
_lowercase =SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
_lowercase =hf_model.to('''cuda''' )
_lowercase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
_lowercase =[[[400, 650]]]
_lowercase =[[1]]
_lowercase =processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowercase =hf_model(**__snake_case )
_lowercase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_lowercase =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowercase =hf_model(**__snake_case )
_lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_lowercase =((75, 275, 1725, 850),)
_lowercase =processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowercase =hf_model(**__snake_case )
_lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_lowercase =[[[400, 650], [800, 650]]]
_lowercase =[[1, 1]]
_lowercase =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowercase =hf_model(**__snake_case )
_lowercase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
UpperCAmelCase__ = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
UpperCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 1
@register_to_config
def __init__( self : Any , lowercase_ : int = 2000 , lowercase_ : float = 0.15 , lowercase_ : float = 0.01 , lowercase_ : float = 13_48.0 , lowercase_ : float = 1e-5 , lowercase_ : int = 1 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ : List[str] = None
self.set_sigmas(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None):
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : float = None , lowercase_ : Union[str, torch.device] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE_ : Dict = torch.linspace(1 , lowercase_ , lowercase_ , device=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int , lowercase_ : float = None , lowercase_ : float = None , lowercase_ : float = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.exp(torch.linspace(math.log(lowercase_) , math.log(lowercase_) , lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any]):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[torch.Generator] = None , lowercase_ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
SCREAMING_SNAKE_CASE_ : Dict = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE_ : Any = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(self.discrete_sigmas.device)
SCREAMING_SNAKE_CASE_ : int = self.discrete_sigmas[timesteps].to(sample.device)
SCREAMING_SNAKE_CASE_ : Dict = self.get_adjacent_sigma(lowercase_ , lowercase_).to(sample.device)
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros_like(lowercase_)
SCREAMING_SNAKE_CASE_ : int = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE_ : List[Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
SCREAMING_SNAKE_CASE_ : int = diffusion.unsqueeze(-1)
SCREAMING_SNAKE_CASE_ : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase_ , device=sample.device , dtype=sample.dtype)
SCREAMING_SNAKE_CASE_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE_ : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase_ , prev_sample_mean=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : Optional[torch.Generator] = None , lowercase_ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE_ : int = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase_).to(sample.device)
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
SCREAMING_SNAKE_CASE_ : List[str] = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
SCREAMING_SNAKE_CASE_ : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE_ : Dict = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
SCREAMING_SNAKE_CASE_ : Dict = step_size.unsqueeze(-1)
SCREAMING_SNAKE_CASE_ : str = sample + step_size * model_output
SCREAMING_SNAKE_CASE_ : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(original_samples.device)
SCREAMING_SNAKE_CASE_ : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
SCREAMING_SNAKE_CASE_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase_) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE_ : List[Any] = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any]):
'''simple docstring'''
return self.config.num_train_timesteps
| 318 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase__ = logging.get_logger(__name__)
lowercase__ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "first_exhausted" , ) -> Optional[int]:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__UpperCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}.''' )
if i == 0:
lowerCAmelCase_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , ) -> str:
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__UpperCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}.''' )
if i == 0:
lowerCAmelCase_ : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
| 241 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = '''linear'''
A : int = '''cosine'''
A : Optional[Any] = '''cosine_with_restarts'''
A : Optional[int] = '''polynomial'''
A : str = '''constant'''
A : Union[str, Any] = '''constant_with_warmup'''
A : Optional[Any] = '''piecewise_constant'''
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
return LambdaLR(__UpperCamelCase ,lambda __UpperCamelCase : 1 ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1.0 ,__UpperCamelCase ) )
return 1.0
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: str ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rule_str.split(':' )
SCREAMING_SNAKE_CASE : int = int(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = float(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : Any = float(rule_list[-1] )
def create_rules_function(__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Any] ):
def rule_func(__UpperCamelCase: int ) -> float:
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE : Any = create_rules_function(__UpperCamelCase ,__UpperCamelCase )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int=-1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 0.5 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Any ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(__UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int = 1 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Dict ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any]=1e-7 ,__UpperCamelCase: Dict=1.0 ,__UpperCamelCase: Optional[Any]=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE : List[str] = lr_init - lr_end
SCREAMING_SNAKE_CASE : Optional[Any] = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
UpperCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase__( __UpperCamelCase: Union[str, SchedulerType] ,__UpperCamelCase: Optimizer ,__UpperCamelCase: Optional[str] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: int = 1 ,__UpperCamelCase: float = 1.0 ,__UpperCamelCase: int = -1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SchedulerType(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCamelCase ,last_epoch=__UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCamelCase ,step_rules=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,num_cycles=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,power=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
| 251 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 360 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (IPNDMScheduler,)
UpperCAmelCase : Optional[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
_A = {'num_train_timesteps': 1_000}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Any ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Tuple ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 271 | 0 |
def _A ( SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
a__ : List[str] =sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
snake_case_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
return image
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = dct.pop(UpperCamelCase__ )
snake_case_ = val
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
snake_case_ = qkv_bias
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 364 if 'coco' in model_name else 224
snake_case_ = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
snake_case_ = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
snake_case_ = tokenizer('\n' , add_special_tokens=UpperCamelCase__ ).input_ids[0]
snake_case_ , snake_case_ = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
snake_case_ = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
snake_case_ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
snake_case_ , snake_case_ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(UpperCamelCase__ )
if key.startswith('Qformer.bert' ):
snake_case_ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
snake_case_ = key.replace('self' , 'attention' )
if "opt_proj" in key:
snake_case_ = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
snake_case_ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
snake_case_ = key.replace('opt' , 'language' )
if key.startswith('t5' ):
snake_case_ = key.replace('t5' , 'language' )
snake_case_ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ , snake_case_ = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ = load_demo_image()
snake_case_ = vis_processors['eval'](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
snake_case_ = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(UpperCamelCase__ )
# create processor
snake_case_ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
snake_case_ = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
snake_case_ = processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
snake_case_ = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
snake_case_ = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
snake_case_ = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case_ = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
snake_case_ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
snake_case_ = ''
snake_case_ = tokenizer(UpperCamelCase__ , return_tensors='pt' ).input_ids.to(UpperCamelCase__ )
snake_case_ = original_model.generate({'image': original_pixel_values} )
snake_case_ = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , UpperCamelCase__ )
snake_case_ = input_ids.shape[1]
snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
snake_case_ = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 358 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Dict = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 200 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
__A = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__A = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A = "<|endoftext|>" if eos_token is None else eos_token
__A = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A = unk_token if pad_token is None else pad_token
__A = eos_token if bos_token is None else bos_token
else:
__A = "<pad>" if pad_token is None else pad_token
__A = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
__A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A = re.compile(
f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' )
def __getstate__( self : Optional[int] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Optional[Any] ,A : Union[str, Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self : List[str] ):
return len(self.sp_model )
def UpperCamelCase_ ( self : int ,A : str ):
__A = self.non_printing_characters_re.sub("" ,A )
# Normalize whitespaces
__A = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__A = unicodedata.normalize("NFC" ,A )
return text
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ):
__A = self.preprocess_text(A )
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : int ):
return self.sp_model.IdToPiece(A )
@staticmethod
def UpperCamelCase_ ( A : str ):
return out_string
def UpperCamelCase_ ( self : str ,A : List[str] ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string
def UpperCamelCase_ ( self : str ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ):
if isinstance(A ,A ):
__A = self.preprocess_text(A )
__A = self.sp_model.encode(A )
else:
__A = [self.preprocess_text(A ) for t in text]
__A = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
__A = torch.tensor(A )
return token_ids
def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ):
return self.sp_model.decode(A )
def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ):
__A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__A = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=A )
| 15 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class a__ ( unittest.TestCase ):
_lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase ( self : int, lowerCAmelCase : Any, lowerCAmelCase : Optional[int], lowerCAmelCase : Union[str, Any] ) -> str:
lowercase : List[str] = TextaTextGenerationPipeline(model=_A, tokenizer=_A )
return generator, ["Something to write", "Something else"]
def lowercase ( self : List[Any], lowerCAmelCase : Tuple, lowerCAmelCase : Any ) -> Optional[int]:
lowercase : Dict = generator('Something there' )
self.assertEqual(_A, [{'generated_text': ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
lowercase : Dict = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=_A )
self.assertEqual(
_A, [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
], )
lowercase : List[Any] = generator(
['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=_A )
self.assertEqual(
_A, [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
], )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase : Union[str, Any] = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt' )
# do_sample=False necessary for reproducibility
lowercase : str = generator('Something there', do_sample=_A )
self.assertEqual(_A, [{'generated_text': ''}] )
lowercase : Tuple = 3
lowercase : int = generator(
'Something there', num_return_sequences=_A, num_beams=_A, )
lowercase : Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_A, _A )
lowercase : Dict = generator('This is a test', do_sample=_A, num_return_sequences=2, return_tensors=_A )
self.assertEqual(
_A, [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
], )
lowercase : List[Any] = generator.model.config.eos_token_id
lowercase : Optional[Any] = '<pad>'
lowercase : Dict = generator(
['This is a test', 'This is a second test'], do_sample=_A, num_return_sequences=2, batch_size=2, return_tensors=_A, )
self.assertEqual(
_A, [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
], )
@require_tf
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Union[str, Any] = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf' )
# do_sample=False necessary for reproducibility
lowercase : Tuple = generator('Something there', do_sample=_A )
self.assertEqual(_A, [{'generated_text': ''}] )
| 371 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'dandelin/vilt-b32-finetuned-vqa'
_lowerCamelCase = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_lowerCamelCase = 'image_qa'
_lowerCamelCase = AutoProcessor
_lowerCamelCase = AutoModelForVisualQuestionAnswering
_lowerCamelCase = ['image', 'text']
_lowerCamelCase = ['text']
def __init__( self : List[str], *lowerCAmelCase : Optional[Any], **lowerCAmelCase : Optional[Any] ) -> str:
requires_backends(self, ['vision'] )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : "Image", lowerCAmelCase : str ) -> Dict:
return self.pre_processor(lowerCAmelCase, lowerCAmelCase, return_tensors='pt' )
def lowercase ( self : List[Any], lowerCAmelCase : int ) -> Tuple:
with torch.no_grad():
return self.model(**lowerCAmelCase ).logits
def lowercase ( self : List[str], lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 53 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def a_ ( _lowerCAmelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
return quad(_lowerCAmelCase , 0 , _lowerCAmelCase , args=(_lowerCAmelCase) )[0]
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
return math.pow(_lowerCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 77 | """simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple = args.log_outputs
lowercase : List[str] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowercase : Optional[Any] = load_metric('wer' )
lowercase : Dict = load_metric('cer' )
# compute metrics
lowercase : List[str] = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowercase : Dict = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowercase : Optional[Any] = f'''WER: {wer_result}\nCER: {cer_result}'''
print(_UpperCAmelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase : Optional[int] = f'''log_{dataset_id}_predictions.txt'''
lowercase : str = f'''log_{dataset_id}_targets.txt'''
with open(_UpperCAmelCase , 'w' ) as p, open(_UpperCAmelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(_UpperCAmelCase , _UpperCAmelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase : Optional[Any] = re.sub(_UpperCAmelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase : List[str] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowercase : Tuple = ' '.join(text.split(_UpperCAmelCase ) )
return text
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : List[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase : List[str] = feature_extractor.sampling_rate
# resample audio
lowercase : int = dataset.cast_column('audio' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
lowercase : Any = 0 if torch.cuda.is_available() else -1
lowercase : List[str] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_UpperCAmelCase ):
lowercase : str = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase : List[str] = prediction['text']
lowercase : int = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowercase : str = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
_UpperCamelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_UpperCamelCase: Any = parser.parse_args()
main(args)
| 356 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase: Dict = logging.get_logger(__name__)
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase : str = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase : Tuple = to_pil_image(_UpperCAmelCase )
lowercase , lowercase : Union[str, Any] = pil_image.size
lowercase : Union[str, Any] = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type='dict' , config=_UpperCAmelCase )
lowercase , lowercase , lowercase , lowercase , lowercase : str = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase : str = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
lowercase : Dict = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Union[str, Any] = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : str = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Any = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Dict = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : int = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Any = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : str, lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "", **lowerCAmelCase : List[Any], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
lowercase : List[Any] = get_size_dict(lowerCAmelCase )
lowercase : str = do_resize
lowercase : List[str] = size
lowercase : int = resample
lowercase : List[str] = apply_ocr
lowercase : str = ocr_lang
lowercase : Union[str, Any] = tesseract_config
def lowercase ( self : Optional[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int, ) -> np.ndarray:
lowercase : Optional[Any] = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase : Optional[int] = (size['height'], size['width'])
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCAmelCase : List[Any], ) -> PIL.Image.Image:
lowercase : Any = do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] = size if size is not None else self.size
lowercase : Dict = get_size_dict(lowerCAmelCase )
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : str = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : Optional[int] = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowercase : int = [to_numpy_array(lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self, 'pytesseract' )
lowercase : str = []
lowercase : Dict = []
for image in images:
lowercase , lowercase : List[str] = apply_tesseract(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
words_batch.append(lowerCAmelCase )
boxes_batch.append(lowerCAmelCase )
if do_resize:
lowercase : str = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase : Any = [flip_channel_order(lowerCAmelCase ) for image in images]
lowercase : Dict = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCAmelCase )
if apply_ocr:
lowercase : List[Any] = words_batch
lowercase : Tuple = boxes_batch
return data
| 53 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase_ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase__ = dataset_path.split("""://""" )[1]
return dataset_path
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : fsspec.AbstractFileSystem , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , recursive=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = threading.Lock()
| 346 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import os
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCamelCase__ ):
for j in range(n_rows - 3 ):
lowerCamelCase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase_ = max(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if max_product > largest:
lowerCamelCase_ = max_product
return largest
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCamelCase_ = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )]
return largest_product(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 47 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A =16
A =32
def snake_case_ (_a : Accelerator , _a : int = 1_6 ):
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A =mocked_dataloaders # noqa: F811
def snake_case_ (_a : Optional[int] , _a : Union[str, Any] ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
UpperCAmelCase = 2
# New Code #
UpperCAmelCase = int(args.gradient_accumulation_steps )
UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
with LocalSGD(
accelerator=__lowerCamelCase , model=__lowerCamelCase , local_sgd_steps=__lowerCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCamelCase ):
UpperCAmelCase = model(**__lowerCamelCase )
UpperCAmelCase = output.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**__lowerCamelCase )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __lowerCamelCase )
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__lowerCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 34 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = tempfile.mkdtemp()
# fmt: off
_snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Tuple , **_lowerCamelCase : Any ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : int , **_lowerCamelCase : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Any ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 288 | 0 |
from __future__ import annotations
from collections.abc import Callable
def _A (__a , __a , __a , __a = 1_00 , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = x_start
SCREAMING_SNAKE_CASE_ : Any = fnc(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.0
for _ in range(__a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE_ : Any = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ : Optional[int] = fnc(__a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE_ : str = xa
SCREAMING_SNAKE_CASE_ : Any = fxa
return area
if __name__ == "__main__":
def _A (__a ) -> List[Any]:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCAmelCase_ : int = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 360 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__A : Optional[int] = True
except (ImportError, AttributeError):
__A : Any = object
def UpperCamelCase_ ( *A__ : Optional[Any] , **A__ : List[str] ):
'''simple docstring'''
pass
__A : Tuple = False
__A : List[Any] = logging.get_logger("transformers-cli/serving")
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_UpperCamelCase , args.host , args.port , args.workers )
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
lowercase = 42
class __snake_case ( A__):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Tuple = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=SCREAMING_SNAKE_CASE__ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=SCREAMING_SNAKE_CASE__ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE__ , help="""Model\'s name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE__ , help="""Model\'s config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=SCREAMING_SNAKE_CASE__ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Dict , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : Any = pipeline
lowerCAmelCase_ : List[Any] = host
lowerCAmelCase_ : str = port
lowerCAmelCase_ : List[str] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'Serving model over {host}:{port}' )
lowerCAmelCase_ : List[str] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=SCREAMING_SNAKE_CASE__ , response_class=SCREAMING_SNAKE_CASE__ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def __lowercase ( self : Tuple ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __lowercase ( self : int ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> Optional[int]:
try:
lowerCAmelCase_ : Optional[int] = self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
if return_ids:
lowerCAmelCase_ : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ , tokens_ids=SCREAMING_SNAKE_CASE__ )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE__ )} )
def __lowercase ( self : str , lowerCamelCase : Dict = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Any = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , lowerCamelCase : Union[str, Any] = Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) , ) -> str:
try:
lowerCAmelCase_ : Optional[Any] = self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return ServeDeTokenizeResult(model="""""" , text=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE__ )} )
async def __lowercase ( self : Optional[Any] , lowerCamelCase : int=Body(SCREAMING_SNAKE_CASE__ , embed=SCREAMING_SNAKE_CASE__ ) ) -> Tuple:
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase_ : Optional[Any] = self._pipeline(SCREAMING_SNAKE_CASE__ )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE__ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(SCREAMING_SNAKE_CASE__ )} )
| 120 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__magic_name__ = 1
while K:
__magic_name__ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__magic_name__ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 152 | 0 |
from collections.abc import Sequence
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: bool = False ) -> float:
if not arr:
return 0
A__ : Dict = 0 if allow_empty_subarrays else float("""-inf""" )
A__ : Optional[int] = 0.0
for num in arr:
A__ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A__ : int = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 192 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=7 ,A__=3 ,A__=1_8 ,A__=3_0 ,A__=4_0_0 ,A__=True ,A__=None ,A__=True ,):
lowercase = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = apply_ocr
def A__ ( self):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( lowerCamelCase_ , unittest.TestCase ):
lowercase_ : Any =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A__ ( self):
lowercase = LayoutLMvaImageProcessingTester(self)
@property
def A__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self):
lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ ,'''do_resize'''))
self.assertTrue(hasattr(lowerCAmelCase__ ,'''size'''))
self.assertTrue(hasattr(lowerCAmelCase__ ,'''apply_ocr'''))
def A__ ( self):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'''height''': 1_8, '''width''': 1_8})
lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2)
self.assertEqual(image_processor.size ,{'''height''': 4_2, '''width''': 4_2})
def A__ ( self):
pass
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,Image.Image)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''')
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,lowerCAmelCase__)
self.assertIsInstance(encoding.boxes ,lowerCAmelCase__)
# Test batched
lowercase = image_processing(lowerCAmelCase__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,np.ndarray)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowercase = image_processing(lowerCAmelCase__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowercase = image_processing(lowerCAmelCase__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def A__ ( self):
# with apply_OCR = True
lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''')
lowercase = Image.open(ds[0]['''file''']).convert('''RGB''')
lowercase = image_processing(lowerCAmelCase__ ,return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) ,len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,lowerCAmelCase__)
self.assertListEqual(encoding.boxes ,lowerCAmelCase__)
# with apply_OCR = False
lowercase = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__)
lowercase = image_processing(lowerCAmelCase__ ,return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4))
| 366 |
from statistics import mean
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
# Number of processes finished
lowercase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase = [0] * no_of_process
# List to include calculation results
lowercase = [0] * no_of_process
# Sort by arrival time.
lowercase = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )]
lowercase = [process_name[i] for i in np.argsort(lowerCAmelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase = arrival_time[i]
lowercase = 0
# Index showing the location of the process being performed
lowercase = 0
# Saves the current response ratio.
lowercase = 0
for i in range(0 , lowerCAmelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase = temp
lowercase = i
# Calculate the turn around time
lowercase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [0] * no_of_process
for i in range(0 , lowerCAmelCase__ ):
lowercase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowercase__ :Dict = 5
lowercase__ :str = ["A", "B", "C", "D", "E"]
lowercase__ :Optional[int] = [1, 2, 3, 4, 5]
lowercase__ :List[Any] = [1, 2, 3, 4, 5]
lowercase__ :List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowercase__ :List[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 97 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=3_2 , _UpperCamelCase=1_6 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_2 , _UpperCamelCase=4 , _UpperCamelCase=[0, 1, 2, 3] , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=[1, 3_8_4, 2_4, 2_4] , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = backbone_out_indices
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[Any] = backbone_featmap_shape
UpperCAmelCase_ : Tuple = scope
UpperCAmelCase_ : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Tuple = num_patches + 1
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [9_6, 1_9_2, 3_8_4, 7_6_8],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Tuple = DPTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Any = DPTForDepthEstimation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : str = model(_UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = DPTForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_snake_case : List[str] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Union[str, Any] = False
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = DPTModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_UpperCamelCase )
UpperCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : str = True
if model_class in get_values(_UpperCamelCase ):
continue
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
UpperCAmelCase_ : str = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
UpperCAmelCase_ : int = model(**_UpperCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = True
if model_class in get_values(_UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
UpperCAmelCase_ : List[str] = model(**_UpperCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
UpperCAmelCase_ : Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase_ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> int:
pass
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase_ : Tuple = DPTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 'add'
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : Any = DPTForDepthEstimation(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
UpperCAmelCase_ : int = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCamelCase )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_UpperCamelCase )
UpperCAmelCase_ : Dict = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase_ : Optional[int] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , _UpperCamelCase , atol=1E-4 ) )
| 29 | """simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCamelCase = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCamelCase = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__lowerCamelCase = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] ,reference_urls=[
'https://github.com/m-popovic/chrF',
] ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = CHRF.CHAR_ORDER ,__UpperCAmelCase = CHRF.WORD_ORDER ,__UpperCAmelCase = CHRF.BETA ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,) -> Union[str, Any]:
A__ = len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A__ = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
A__ = CHRF(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = sb_chrf.corpus_score(__UpperCAmelCase ,__UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 221 | 0 |
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : List[str] = credit_card_number
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Any = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
lowerCamelCase__ : Optional[int] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase__ : int = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : List[Any] = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 265 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_A : Optional[Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
_A : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : List[str] = create_model(
'''HTSAT-tiny''' , '''roberta''' , UpperCAmelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=UpperCAmelCase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : int = R'''.*sequential.(\d+).*'''
lowerCamelCase__ : Any = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ : List[str] = key.replace(UpperCAmelCase , UpperCAmelCase )
if re.match(UpperCAmelCase , UpperCAmelCase ):
# replace sequential layers with list
lowerCamelCase__ : List[Any] = re.match(UpperCAmelCase , UpperCAmelCase ).group(1 )
lowerCamelCase__ : Optional[int] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(UpperCAmelCase )//3}.linear." )
elif re.match(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = int(re.match(UpperCAmelCase , UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase__ : str = 1 if projecton_layer == 0 else 2
lowerCamelCase__ : List[str] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase__ : Optional[Any] = value
lowerCamelCase__ : Optional[Any] = mixed_qkv.size(0 ) // 3
lowerCamelCase__ : Tuple = mixed_qkv[:qkv_dim]
lowerCamelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase__ : int = mixed_qkv[qkv_dim * 2 :]
lowerCamelCase__ : Optional[int] = query_layer
lowerCamelCase__ : str = key_layer
lowerCamelCase__ : List[str] = value_layer
else:
lowerCamelCase__ : Tuple = value
return model_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Dict = init_clap(UpperCAmelCase , enable_fusion=UpperCAmelCase )
clap_model.eval()
lowerCamelCase__ : List[Any] = clap_model.state_dict()
lowerCamelCase__ : Dict = rename_state_dict(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = ClapConfig()
lowerCamelCase__ : Optional[int] = enable_fusion
lowerCamelCase__ : Optional[Any] = ClapModel(UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
transformers_config.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
_A : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 265 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_input_output_texts(lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Tuple = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase_ : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase_ : int = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Optional[int] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase_ : Optional[int] = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : List[Any] = "こんにちは、世界。"
UpperCAmelCase_ : List[Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase_ : List[str] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase_ : str = tokenizer.encode(lowercase_ , prefix_text=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : str = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : List[str] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。"
UpperCAmelCase_ : Union[str, Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Dict = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase_ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase_ : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase_ : Dict = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : Optional[Any] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : str = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : str = tokenizer.encode("あンいワ" )
UpperCAmelCase_ : List[Any] = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase_ : str = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase_ : int = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ )
# fmt: off
UpperCAmelCase_ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase_ : Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase_ : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase_ )
self.assertListEqual(x_token.token_type_ids , lowercase_ )
self.assertListEqual(x_token.attention_mask , lowercase_ )
self.assertListEqual(x_token_a.input_ids , lowercase_ )
self.assertListEqual(x_token_a.token_type_ids , lowercase_ )
self.assertListEqual(x_token_a.attention_mask , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 61 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61 | 1 |
def a( A : int , A : int , A : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(A : int , A : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
a = update_area_of_max_square(A , col + 1 )
a = update_area_of_max_square(row + 1 , col + 1 )
a = update_area_of_max_square(row + 1 , A )
if mat[row][col]:
a = 1 + min([right, diagonal, down] )
a = max(largest_square_area[0] , A )
return sub_problem_sol
else:
return 0
a = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def a( A : int , A : int , A : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
A : int , A : int , A : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
a = update_area_of_max_square_using_dp_array(A , col + 1 , A )
a = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A )
a = update_area_of_max_square_using_dp_array(row + 1 , A , A )
if mat[row][col]:
a = 1 + min([right, diagonal, down] )
a = max(largest_square_area[0] , A )
a = sub_problem_sol
return sub_problem_sol
else:
return 0
a = [0]
a = [[-1] * cols for _ in range(A )]
update_area_of_max_square_using_dp_array(0 , 0 , A )
return largest_square_area[0]
def a( A : int , A : int , A : list[list[int]] ) -> int:
"""simple docstring"""
a = [[0] * (cols + 1) for _ in range(rows + 1 )]
a = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
a = dp_array[row][col + 1]
a = dp_array[row + 1][col + 1]
a = dp_array[row + 1][col]
if mat[row][col] == 1:
a = 1 + min(A , A , A )
a = max(dp_array[row][col] , A )
else:
a = 0
return largest_square_area
def a( A : int , A : int , A : list[list[int]] ) -> int:
"""simple docstring"""
a = [0] * (cols + 1)
a = [0] * (cols + 1)
a = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
a = current_row[col + 1]
a = next_row[col + 1]
a = next_row[col]
if mat[row][col] == 1:
a = 1 + min(A , A , A )
a = max(current_row[col] , A )
else:
a = 0
a = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 71 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = IFPipeline
a__ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase_ (self ) -> Tuple:
return self._get_dummy_components()
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Any:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCAmelCase_ (self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ (self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ (self ) -> List[str]:
self._test_save_load_local()
def lowerCAmelCase_ (self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase_ (self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> List[str]:
# if
__UpperCAmelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowercase__ , tokenizer=lowercase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__UpperCAmelCase , __UpperCAmelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__UpperCAmelCase = None
__UpperCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components )
__UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components )
__UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , num_inference_steps=2 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , image=lowercase__ , num_inference_steps=2 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , image=lowercase__ , original_image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowercase__ )
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , image=lowercase__ , mask_image=lowercase__ , num_inference_steps=2 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowercase__ )
__UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowercase__ )
__UpperCAmelCase = pipe_a(
prompt_embeds=lowercase__ , negative_prompt_embeds=lowercase__ , image=lowercase__ , mask_image=lowercase__ , original_image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def __a ( ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
__a = "sshleifer/student_marian_en_ro_6_1"
__a = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Any , snake_case_ : List[str]=False , snake_case_ : Tuple=None , snake_case_ : Dict=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : List[str]=True , ):
snake_case__ : List[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
snake_case__ : int = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ : Tuple = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ : Dict = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase ( self : List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def lowerCamelCase ( self : Tuple ):
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : List[str] ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def lowerCamelCase ( self : str ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ : Any = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ : Optional[int] = experiments[experiment_id]
snake_case__ : Optional[int] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ : Union[str, Any] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""] )
snake_case__ : str = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["""n_matches"""] )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
snake_case__ : Dict = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : List[str] = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[str] = eval_metrics[0]
snake_case__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
# test if do_predict saves generations and metrics
snake_case__ : Optional[int] = os.listdir(snake_case_ )
snake_case__ : List[str] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str ) -> Tuple[int, float]:
snake_case__ : Dict = """--skip_memory_metrics 0"""
snake_case__ : Optional[int] = self.run_trainer(
max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
snake_case__ : Optional[Any] = TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : Optional[int] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case__ : Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case__ : Optional[int] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ : int = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
snake_case_ , snake_case_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def lowerCamelCase ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3E-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , ):
snake_case__ : Optional[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(snake_case_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
snake_case__ : List[Any] = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(snake_case_ )}\n ".split()
snake_case__ : Dict = """
--do_predict
""".split()
snake_case__ : List[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ : Any = get_gpu_count()
snake_case__ : Optional[int] = get_torch_dist_unique_port()
snake_case__ : List[str] = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
snake_case__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
snake_case__ : str = ["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
main()
return output_dir
| 43 |
'''simple docstring'''
def __snake_case( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case( _lowerCAmelCase ) -> None:
assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid )
assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[str] = 0
snake_case__ : str = len(_lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ : List[Any] = (left + right) // 2
snake_case__ : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ : Tuple = mid + 1
else:
snake_case__ : Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = len(grid[0] )
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCAmelCase ) * len(grid[0] )) - total
def __snake_case( _lowerCAmelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCAmelCase ):
if number < 0:
total += len(_lowerCAmelCase ) - i
break
return total
def __snake_case( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case__ : int = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ : Tuple = timeit(f"{func}(grid=grid)" , setup=_lowerCAmelCase , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, 0
_UpperCAmelCase = ugly_nums[ia] * 2
_UpperCAmelCase = ugly_nums[ia] * 3
_UpperCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , _UpperCamelCase ):
_UpperCAmelCase = min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
ugly_nums.append(_UpperCamelCase )
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 260 | """simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(_UpperCamelCase )) / (2 * a)
snake_case = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case ,snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 150 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
A : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
A : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
# test with a raw waveform
lowercase : Union[str, Any] = np.zeros((34000,) )
lowercase : Any = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Tuple = examples
lowercase : Optional[Any] = audio_classifier(SCREAMING_SNAKE_CASE__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
] , )
lowercase : List[str] = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
] , )
self.run_torchaudio(SCREAMING_SNAKE_CASE__ )
@require_torchaudio
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import datasets
# test with a local file
lowercase : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase : Optional[Any] = dataset[0]['''audio''']['''array''']
lowercase : Tuple = audio_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
] , )
@require_torch
def __lowerCamelCase ( self ):
lowercase : Dict = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase : List[Any] = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = np.ones((8000,) )
lowercase : Optional[int] = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
lowercase : Dict = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase : str = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase : List[str] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase : str = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __lowerCamelCase ( self ):
import datasets
lowercase : Optional[Any] = '''superb/wav2vec2-base-superb-ks'''
lowercase : List[str] = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE__ )
lowercase : str = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase : Dict = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase : Union[str, Any] = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __lowerCamelCase ( self ):
pass
| 173 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : List[Any] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=_UpperCamelCase, max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] = datasets.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Tuple = 16
elif accelerator.mixed_precision != "no":
lowercase : str = 8
else:
lowercase : List[str] = None
return tokenizer.pad(
_UpperCamelCase, padding='''longest''', max_length=_UpperCamelCase, pad_to_multiple_of=_UpperCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : int = DataLoader(
tokenized_datasets['''train'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', _UpperCamelCase ) == "1":
lowercase : Tuple = 2
# New Code #
lowercase : Optional[int] = int(args.gradient_accumulation_steps )
lowercase : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : Tuple = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict = config['''lr''']
lowercase : List[str] = int(config['''num_epochs'''] )
lowercase : str = int(config['''seed'''] )
lowercase : str = int(config['''batch_size'''] )
lowercase : Any = evaluate.load('''glue''', '''mrpc''' )
set_seed(_UpperCamelCase )
lowercase , lowercase : Dict = get_dataloaders(_UpperCamelCase, _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : int = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=_UpperCamelCase )
# Instantiate scheduler
lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = accelerator.prepare(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase, model=_UpperCamelCase, local_sgd_steps=_UpperCamelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
lowercase : int = model(**_UpperCamelCase )
lowercase : Optional[int] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[int] = model(**_UpperCamelCase )
lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowercase , lowercase : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase, references=_UpperCamelCase, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _UpperCamelCase )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=_UpperCamelCase, default=_UpperCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=_UpperCamelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=_UpperCamelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
main()
| 173 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_a = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = FNetTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : int = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : str = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 61 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = u
for i in range(1, __lowerCamelCase ):
UpperCAmelCase_ : int = temp * (u - i)
return temp
def __a ( ):
UpperCAmelCase_ : str = int(input("enter the numbers of values: " ) )
UpperCAmelCase_ : list[list[float]] = []
for _ in range(__lowerCamelCase ):
y.append([] )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
y[i].append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = 0
print("enter the values of parameters in a list: " )
UpperCAmelCase_ : Union[str, Any] = list(map(__lowerCamelCase, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : int = float(input() )
UpperCAmelCase_ : Tuple = int(input("enter the value to interpolate: " ) )
UpperCAmelCase_ : Tuple = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, __lowerCamelCase ):
for j in range(n - i ):
UpperCAmelCase_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase_ : Optional[int] = y[0][0]
for i in range(1, __lowerCamelCase ):
summ += (ucal(__lowerCamelCase, __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 61 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCamelCase_ ( _a : List[str] , _a : Tuple , _a : List[str] , _a : List[Any] ):
'''simple docstring'''
if isinstance(_a , _a ):
UpperCAmelCase_ : Dict = np.full((len(_a ), sequence_length, 2) , _a )
else:
UpperCAmelCase_ : List[Any] = np.full((len(_a ), sequence_length) , _a )
for i, tensor in enumerate(_a ):
if padding_side == "right":
if isinstance(_a , _a ):
UpperCAmelCase_ : str = tensor[:sequence_length]
else:
UpperCAmelCase_ : List[str] = tensor[:sequence_length]
else:
if isinstance(_a , _a ):
UpperCAmelCase_ : str = tensor[:sequence_length]
else:
UpperCAmelCase_ : List[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = ord(_a )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase_ : List[str] = unicodedata.category(_a )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : PreTrainedTokenizerBase
A__ : Union[bool, str, PaddingStrategy] = True
A__ : Optional[int] = None
A__ : Optional[int] = None
A__ : int = -100
A__ : str = "pt"
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ) -> str:
import torch
UpperCAmelCase_ : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : List[Any] = self.tokenizer.pad(
lowerCamelCase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,)
if labels is None:
return batch
UpperCAmelCase_ : int = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCAmelCase_ : str = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Union[str, Any] = [
list(lowerCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) + list(lowerCamelCase_ ) for label in labels
]
UpperCAmelCase_ : Dict = [feature["""ner_tags"""] for feature in features]
UpperCAmelCase_ : List[Any] = padding_tensor(lowerCamelCase_ ,-1 ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = [feature["""original_entity_spans"""] for feature in features]
UpperCAmelCase_ : Optional[Any] = padding_tensor(lowerCamelCase_ ,(-1, -1) ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = {k: torch.tensor(lowerCamelCase_ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 367 |
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations(_a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a : int , _a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase_ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
UpperCAmelCase_ : Dict = answer
return answer
UpperCAmelCase_ : Tuple = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [0] * (target + 1)
UpperCAmelCase_ : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = 3
UpperCamelCase_ = 5
UpperCamelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 59 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A( a , a ):
snake_case_ = '''resnet'''
snake_case_ = ['''basic''', '''bottleneck''']
def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[256, 512, 1_024, 2_048] , _snake_case=[3, 4, 6, 3] , _snake_case="bottleneck" , _snake_case="relu" , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = downsample_in_first_stage
__a = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-3 | 6 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ):
"""simple docstring"""
a :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a :Any = latents.to(_lowerCamelCase )
a :Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a :int = torch.device(F'''cuda:{gpu_id}''' )
a :int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a :Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
a :str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
a :int = self._execution_device
a :Optional[Any] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
a :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
a :Optional[Any] = self.scheduler.timesteps
a :List[str] = self.unet.config.in_channels
a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
a :int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :Union[str, Any] = {'''image_embeds''': image_embeds}
a :Optional[Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a :Any = noise_pred.split(latents.shape[1] , dim=1 )
a , a :List[str] = noise_pred.chunk(2 )
a , a :int = variance_pred.chunk(2 )
a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a :str = image * 0.5 + 0.5
a :List[Any] = image.clamp(0 , 1 )
a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94 | 0 |
"""simple docstring"""
import math
import os
import sys
def a_ ( __lowercase : str ) -> str:
_snake_case = ''
try:
with open(__lowercase , 'rb' ) as binary_file:
_snake_case = binary_file.read()
for dat in data:
_snake_case = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __lowercase : dict[str, str] , __lowercase : str , __lowercase : int , __lowercase : str ) -> None:
lexicon.pop(__lowercase )
_snake_case = last_match_id
if math.loga(__lowercase ).is_integer():
for curr_key in lexicon:
_snake_case = '0' + lexicon[curr_key]
_snake_case = bin(__lowercase )[2:]
def a_ ( __lowercase : str ) -> str:
_snake_case = {'0': '0', '1': '1'}
_snake_case , _snake_case = '', ''
_snake_case = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_snake_case = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowercase , __lowercase , __lowercase , __lowercase )
index += 1
_snake_case = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_snake_case = lexicon[curr_string]
result += last_match_id
return result
def a_ ( __lowercase : str , __lowercase : str ) -> str:
_snake_case = os.path.getsize(__lowercase )
_snake_case = bin(__lowercase )[2:]
_snake_case = len(__lowercase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a_ ( __lowercase : str , __lowercase : str ) -> None:
_snake_case = 8
try:
with open(__lowercase , 'wb' ) as opened_file:
_snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __lowercase : str , __lowercase : str ) -> None:
_snake_case = read_file_binary(__lowercase )
_snake_case = compress_data(__lowercase )
_snake_case = add_file_length(__lowercase , __lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 368 |
def a_ ( __lowercase : list[int] , __lowercase : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(__lowercase ) == len(__lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case , _snake_case , _snake_case = equationa
_snake_case , _snake_case , _snake_case = equationa
# Calculate the determinants of the matrices
_snake_case = aa * ba - aa * ba
_snake_case = ca * ba - ca * ba
_snake_case = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case = determinant_x / determinant
_snake_case = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 130 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :int=0.2 , lowercase_ :Any=0.2 )-> Optional[Any]:
A__ = bp_numa
A__ = bp_numa
A__ = bp_numa
A__ = conva_get[:2]
A__ = conva_get[2]
A__ = size_pa
A__ = rate_w
A__ = rate_t
A__ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = -2 * np.random.rand(self.conva[1] ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Any )-> Tuple:
A__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(snake_case__ , "wb" ) as f:
pickle.dump(snake_case__ , snake_case__ )
print(F"Model saved: {save_path}" )
@classmethod
def UpperCAmelCase_ ( cls :Dict , lowercase_ :str )-> Union[str, Any]:
with open(snake_case__ , "rb" ) as f:
A__ = pickle.load(snake_case__ ) # noqa: S301
A__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
A__ = model_dic.get("size_pooling1" )
A__ = model_dic.get("num_bp1" )
A__ = model_dic.get("num_bp2" )
A__ = model_dic.get("num_bp3" )
A__ = model_dic.get("rate_weight" )
A__ = model_dic.get("rate_thre" )
# create model instance
A__ = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# modify model parameter
A__ = model_dic.get("w_conv1" )
A__ = model_dic.get("wkj" )
A__ = model_dic.get("vji" )
A__ = model_dic.get("thre_conv1" )
A__ = model_dic.get("thre_bp2" )
A__ = model_dic.get("thre_bp3" )
return conv_ins
def UpperCAmelCase_ ( self :str , lowercase_ :int )-> List[str]:
return 1 / (1 + np.exp(-1 * x ))
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return round(snake_case__ , 3 )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int , lowercase_ :Optional[int] )-> Any:
A__ = convs[0]
A__ = convs[1]
A__ = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
A__ = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
A__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
A__ = []
A__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
A__ = []
for i_focus in range(len(snake_case__ ) ):
A__ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
A__ = np.asmatrix(snake_case__ ).reshape(
snake_case__ , snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
A__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
A__ = np.asarray(snake_case__ )
return focus_list, data_featuremap
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :List[str]="average_pool" )-> List[str]:
A__ = len(featuremaps[0] )
A__ = int(size_map / size_pooling )
A__ = []
for i_map in range(len(snake_case__ ) ):
A__ = featuremaps[i_map]
A__ = []
for i_focus in range(0 , snake_case__ , snake_case__ ):
for j_focus in range(0 , snake_case__ , snake_case__ ):
A__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
A__ = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> List[str]:
A__ = []
for i in range(len(snake_case__ ) ):
A__ = np.shape(data[i] )
A__ = data[i].reshape(1 , shapes[0] * shapes[1] )
A__ = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
A__ = np.asarray(snake_case__ )
return data_expanded
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] )-> Optional[int]:
A__ = np.asarray(snake_case__ )
A__ = np.shape(snake_case__ )
A__ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] )-> Tuple:
A__ = []
A__ = 0
for i_map in range(snake_case__ ):
A__ = np.ones((size_map, size_map) )
for i in range(0 , snake_case__ , snake_case__ ):
for j in range(0 , snake_case__ , snake_case__ ):
A__ = pd_pool[
i_pool
]
A__ = i_pool + 1
A__ = np.multiply(
snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Any=bool )-> str:
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(snake_case__ )) )
print((" - - Shape: Teach_Data ", np.shape(snake_case__ )) )
A__ = 0
A__ = []
A__ = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
A__ = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
A__ = np.asmatrix(datas_train[p] )
A__ = np.asarray(datas_teach[p] )
A__ = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(snake_case__ , self.size_poolinga )
A__ = np.shape(snake_case__ )
A__ = self._expand(snake_case__ )
A__ = data_bp_input
A__ = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa
A__ = self.sig(snake_case__ )
A__ = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa
A__ = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A__ = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) )
A__ = np.multiply(
np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) )
A__ = np.dot(snake_case__ , self.vji )
A__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
A__ = pd_conva_pooled.T.getA().tolist()
A__ = self._calculate_gradient_from_pool(
snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A__ = self._expand_mat(pd_conva_all[k_conv] )
A__ = self.rate_weight * np.dot(snake_case__ , snake_case__ )
A__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A__ = self.thre_bpa - pd_k_all * self.rate_thre
A__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A__ = rp + 1
A__ = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
A__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__ , "+-" )
plt.plot(snake_case__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(snake_case__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> Optional[int]:
A__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
A__ = np.asmatrix(datas_test[p] )
A__ = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(snake_case__ , self.size_poolinga )
A__ = self._expand(snake_case__ )
A__ = data_bp_input
A__ = bp_outa * self.vji.T - self.thre_bpa
A__ = self.sig(snake_case__ )
A__ = bp_outa * self.wkj.T - self.thre_bpa
A__ = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
A__ = [list(map(self.do_round , snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Dict )-> Dict:
A__ = np.asmatrix(snake_case__ )
A__ = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(snake_case__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 237 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : str ):
class UpperCAmelCase :
def __init__(self : Optional[int] , snake_case__ : str ) -> Any:
'''simple docstring'''
snake_case : List[str] = metric_id
class UpperCAmelCase :
A__ : List[str] = [MetricMock(A_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Any ):
if "tmp_path" in args:
snake_case : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase , match="https://huggingface.co/docs/evaluate" ):
func(*__lowerCamelCase )
| 59 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple=0.2 , _lowerCAmelCase: Union[str, Any]=0.2 ):
lowercase :Any = bp_numa
lowercase :str = bp_numa
lowercase :Dict = bp_numa
lowercase :Dict = conva_get[:2]
lowercase :Any = conva_get[2]
lowercase :Any = size_pa
lowercase :Tuple = rate_w
lowercase :Optional[Any] = rate_t
lowercase :int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase :Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase :Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase :Tuple = -2 * np.random.rand(self.conva[1] ) + 1
lowercase :Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
lowercase :List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Optional[int] ):
# save model dict with pickle
lowercase :Optional[Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(_lowerCAmelCase , "wb" ) as f:
pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
print(F"Model saved: {save_path}" )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: int , _lowerCAmelCase: List[Any] ):
# read saved model
with open(_lowerCAmelCase , "rb" ) as f:
lowercase :List[str] = pickle.load(_lowerCAmelCase ) # noqa: S301
lowercase :Dict = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase :Union[str, Any] = model_dic.get("size_pooling1" )
lowercase :Optional[Any] = model_dic.get("num_bp1" )
lowercase :List[str] = model_dic.get("num_bp2" )
lowercase :List[Any] = model_dic.get("num_bp3" )
lowercase :Tuple = model_dic.get("rate_weight" )
lowercase :Optional[int] = model_dic.get("rate_thre" )
# create model instance
lowercase :Optional[int] = CNN(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# modify model parameter
lowercase :Tuple = model_dic.get("w_conv1" )
lowercase :Tuple = model_dic.get("wkj" )
lowercase :Optional[Any] = model_dic.get("vji" )
lowercase :str = model_dic.get("thre_conv1" )
lowercase :str = model_dic.get("thre_bp2" )
lowercase :Tuple = model_dic.get("thre_bp3" )
return conv_ins
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: int ):
return round(_lowerCAmelCase , 3 )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Union[str, Any] ):
# convolution process
lowercase :Optional[Any] = convs[0]
lowercase :List[Any] = convs[1]
lowercase :Optional[Any] = np.shape(_lowerCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase :Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ):
lowercase :int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowerCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase :int = []
lowercase :Optional[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowerCAmelCase ):
lowercase :Tuple = []
for i_focus in range(len(_lowerCAmelCase ) ):
lowercase :Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowerCAmelCase ) )
lowercase :List[Any] = np.asmatrix(_lowerCAmelCase ).reshape(
_lowerCAmelCase , _lowerCAmelCase )
data_featuremap.append(_lowerCAmelCase )
# expanding the data slice to One dimenssion
lowercase :int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowerCAmelCase ) )
lowercase :List[Any] = np.asarray(_lowerCAmelCase )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str="average_pool" ):
# pooling process
lowercase :Any = len(featuremaps[0] )
lowercase :Optional[int] = int(size_map / size_pooling )
lowercase :Any = []
for i_map in range(len(_lowerCAmelCase ) ):
lowercase :Union[str, Any] = featuremaps[i_map]
lowercase :int = []
for i_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
for j_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[int] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowerCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowerCAmelCase ) )
lowercase :str = np.asmatrix(_lowerCAmelCase ).reshape(_lowerCAmelCase , _lowerCAmelCase )
featuremap_pooled.append(_lowerCAmelCase )
return featuremap_pooled
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[int] ):
# expanding three dimension data to one dimension list
lowercase :Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
lowercase :str = np.shape(data[i] )
lowercase :Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase :str = data_listed.getA().tolist()[0]
data_expanded.extend(_lowerCAmelCase )
lowercase :int = np.asarray(_lowerCAmelCase )
return data_expanded
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
# expanding matrix to one dimension list
lowercase :int = np.asarray(_lowerCAmelCase )
lowercase :List[str] = np.shape(_lowerCAmelCase )
lowercase :Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] ):
lowercase :List[Any] = []
lowercase :Tuple = 0
for i_map in range(_lowerCAmelCase ):
lowercase :Dict = np.ones((size_map, size_map) )
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
for j in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
lowercase :Dict = pd_pool[
i_pool
]
lowercase :Dict = i_pool + 1
lowercase :Dict = np.multiply(
_lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowerCAmelCase )
return pd_all
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(_lowerCAmelCase )) )
print((" - - Shape: Teach_Data ", np.shape(_lowerCAmelCase )) )
lowercase :str = 0
lowercase :Tuple = []
lowercase :Tuple = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
lowercase :Dict = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(_lowerCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase :int = np.asmatrix(datas_train[p] )
lowercase :Dict = np.asarray(datas_teach[p] )
lowercase :Union[str, Any] = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase :Tuple = self.pooling(_lowerCAmelCase , self.size_poolinga )
lowercase :Optional[int] = np.shape(_lowerCAmelCase )
lowercase :Optional[Any] = self._expand(_lowerCAmelCase )
lowercase :Optional[Any] = data_bp_input
lowercase :List[Any] = np.dot(_lowerCAmelCase , self.vji.T ) - self.thre_bpa
lowercase :List[Any] = self.sig(_lowerCAmelCase )
lowercase :List[Any] = np.dot(_lowerCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase :int = self.sig(_lowerCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase :Dict = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) )
lowercase :Tuple = np.multiply(
np.dot(_lowerCAmelCase , self.wkj ) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) )
lowercase :Any = np.dot(_lowerCAmelCase , self.vji )
lowercase :int = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase :Optional[int] = pd_conva_pooled.T.getA().tolist()
lowercase :str = self._calculate_gradient_from_pool(
_lowerCAmelCase , _lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase :Union[str, Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase :Dict = self.rate_weight * np.dot(_lowerCAmelCase , _lowerCAmelCase )
lowercase :List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase :Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase :List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase :Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase :int = self.thre_bpa - pd_k_all * self.rate_thre
lowercase :Dict = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase :str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase :str = rp + 1
lowercase :Any = error_count / patterns
all_mse.append(_lowerCAmelCase )
def draw_error():
lowercase :int = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowerCAmelCase , "+-" )
plt.plot(_lowerCAmelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(_lowerCAmelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Optional[int] ):
# model predict
lowercase :Optional[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(_lowerCAmelCase )) )
for p in range(len(_lowerCAmelCase ) ):
lowercase :Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase :Union[str, Any] = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase :Union[str, Any] = self.pooling(_lowerCAmelCase , self.size_poolinga )
lowercase :str = self._expand(_lowerCAmelCase )
lowercase :Tuple = data_bp_input
lowercase :Optional[int] = bp_outa * self.vji.T - self.thre_bpa
lowercase :Optional[int] = self.sig(_lowerCAmelCase )
lowercase :Tuple = bp_outa * self.wkj.T - self.thre_bpa
lowercase :str = self.sig(_lowerCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase :List[str] = [list(map(self.do_round , _lowerCAmelCase ) ) for each in produce_out]
return np.asarray(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Union[str, Any] ):
# return the data of image after convoluting process so we can check it out
lowercase :Union[str, Any] = np.asmatrix(_lowerCAmelCase )
lowercase :Any = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase :Optional[int] = self.pooling(_lowerCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 359 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, **lowerCamelCase ):
lowercase :List[Any] = AutoConfig.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowercase :Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 158 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A__ = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = XLNetConfig.from_json_file(snake_case )
_lowerCAmelCase = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_lowerCAmelCase = finetuning_task
_lowerCAmelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase = XLNetForSequenceClassification(snake_case )
elif "squad" in finetuning_task:
_lowerCAmelCase = finetuning_task
_lowerCAmelCase = XLNetForQuestionAnswering(snake_case )
else:
_lowerCAmelCase = XLNetLMHeadModel(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case , snake_case , snake_case )
# Save pytorch-model
_lowerCAmelCase = os.path.join(snake_case , snake_case )
_lowerCAmelCase = os.path.join(snake_case , snake_case )
print(F'Save PyTorch model to {os.path.abspath(snake_case )}' )
torch.save(model.state_dict() , snake_case )
print(F'Save configuration file to {os.path.abspath(snake_case )}' )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
A__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 |
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 325 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite) | 325 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Optional[Any] = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class _UpperCAmelCase ( lowerCAmelCase_):
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _UpperCAmelCase ( lowerCAmelCase_):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] = None ):
snake_case_ : Any = max_length
snake_case_ : Optional[int] = max_position_embeddings
@add_start_docstrings(_snake_case )
def __call__( self : Dict , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Any ):
snake_case_ : Tuple = input_ids.shape[-1]
snake_case_ : Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _UpperCAmelCase ( lowerCAmelCase_):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , _snake_case , )
snake_case_ : List[str] = start_length
snake_case_ : List[str] = max_new_tokens
snake_case_ : Any = start_length + max_new_tokens
@add_start_docstrings(_snake_case )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : int ):
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( lowerCAmelCase_):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ):
snake_case_ : List[str] = max_time
snake_case_ : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_snake_case )
def __call__( self : List[str] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Tuple ):
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( lowerCAmelCase_):
@add_start_docstrings(_snake_case )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ):
return any(criteria(_snake_case , _snake_case ) for criteria in self )
@property
def _snake_case ( self : Optional[int] ):
for stopping_criterium in self:
if isinstance(_snake_case , _snake_case ):
return stopping_criterium.max_length
elif isinstance(_snake_case , _snake_case ):
return stopping_criterium.max_length
return None
def __lowercase ( _a , _a ):
snake_case_ : Optional[Any] = stopping_criteria.max_length
snake_case_ : int = deepcopy(__lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__lowerCAmelCase ) )
return new_stopping_criteria
| 264 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A__ : ClassVar[Features] = Features({'''audio''': Audio()} )
A__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
A__ : str = "audio"
A__ : str = "labels"
def snake_case_ ( self : List[Any] , _snake_case : List[str] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowercase : Optional[Any] = copy.deepcopy(self )
__lowercase : Optional[int] = self.label_schema.copy()
__lowercase : Tuple = features[self.label_column]
__lowercase : Optional[Any] = label_schema
return task_template
@property
def snake_case_ ( self : Optional[Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 156 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__snake_case ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__snake_case ="""▁"""
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int="[CLS]" , UpperCAmelCase__ : Optional[int]="[SEP]" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : List[str]="[SEP]" , UpperCAmelCase__ : List[str]="<pad>" , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
return len(self.sp_model )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Tuple:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : str , UpperCAmelCase__ : List[str] ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , UpperCAmelCase__ )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str ) -> List[str]:
lowerCAmelCase = self.preprocess_text(UpperCAmelCase__ )
lowerCAmelCase = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
lowerCAmelCase = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Any:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple ) -> str:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 363 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''MobileNetV1Config'''
# Base docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Optional[int], lowercase__ : Any=None ):
'''simple docstring'''
__lowercase ={}
if isinstance(lowercase__, lowercase__ ):
__lowercase =model.mobilenet_va
else:
__lowercase =model
__lowercase ='MobilenetV1/Conv2d_0/'
__lowercase =backbone.conv_stem.convolution.weight
__lowercase =backbone.conv_stem.normalization.bias
__lowercase =backbone.conv_stem.normalization.weight
__lowercase =backbone.conv_stem.normalization.running_mean
__lowercase =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowercase =i + 1
__lowercase =i * 2
__lowercase =backbone.layer[pt_index]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
__lowercase =backbone.layer[pt_index + 1]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
if isinstance(lowercase__, lowercase__ ):
__lowercase ='MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowercase =model.classifier.weight
__lowercase =model.classifier.bias
return tf_to_pt_map
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : Optional[Any] ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowercase =tf.train.list_variables(lowercase__ )
__lowercase ={}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
__lowercase =tf.train.load_variable(lowercase__, lowercase__ )
__lowercase =array
# Build TF to PyTorch weights loading map
__lowercase =_build_tf_to_pytorch_map(lowercase__, lowercase__, lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
__lowercase =tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowercase =np.transpose(lowercase__, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowercase =array.squeeze().transpose()
else:
__lowercase =np.transpose(lowercase__, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
__lowercase =torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__, lowercase__ )
tf_weights.pop(name + '/RMSProp', lowercase__ )
tf_weights.pop(name + '/RMSProp_1', lowercase__ )
tf_weights.pop(name + '/ExponentialMovingAverage', lowercase__ )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __UpperCamelCase ( lowercase__ : torch.Tensor, lowercase__ : nn.Convad ):
'''simple docstring'''
__lowercase , __lowercase =features.shape[-2:]
__lowercase , __lowercase =conv_layer.stride
__lowercase , __lowercase =conv_layer.kernel_size
if in_height % stride_height == 0:
__lowercase =max(kernel_height - stride_height, 0 )
else:
__lowercase =max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__lowercase =max(kernel_width - stride_width, 0 )
else:
__lowercase =max(kernel_width - (in_width % stride_width), 0 )
__lowercase =pad_along_width // 2
__lowercase =pad_along_width - pad_left
__lowercase =pad_along_height // 2
__lowercase =pad_along_height - pad_top
__lowercase =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__, lowercase__, 'constant', 0.0 )
class lowerCAmelCase ( nn.Module ):
def __init__( self : int , __lowercase : MobileNetVaConfig , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : Optional[int] = 1 , __lowercase : Optional[int] = 1 , __lowercase : bool = False , __lowercase : Optional[bool] = True , __lowercase : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__lowercase =config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__lowercase =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowercase =nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase , groups=__lowercase , bias=__lowercase , padding_mode='zeros' , )
if use_normalization:
__lowercase =nn.BatchNormad(
num_features=__lowercase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__lowercase , track_running_stats=__lowercase , )
else:
__lowercase =None
if use_activation:
if isinstance(__lowercase , __lowercase ):
__lowercase =ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowercase ):
__lowercase =ACTaFN[config.hidden_act]
else:
__lowercase =config.hidden_act
else:
__lowercase =None
def snake_case ( self : Any , __lowercase : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__lowercase =apply_tf_padding(__lowercase , self.convolution )
__lowercase =self.convolution(__lowercase )
if self.normalization is not None:
__lowercase =self.normalization(__lowercase )
if self.activation is not None:
__lowercase =self.activation(__lowercase )
return features
class lowerCAmelCase ( A ):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def snake_case ( self : Union[str, Any] , __lowercase : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , A , )
class lowerCAmelCase ( A ):
def __init__( self : Tuple , __lowercase : MobileNetVaConfig , __lowercase : bool = True ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config
__lowercase =32
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowercase =MobileNetVaConvLayer(
__lowercase , in_channels=config.num_channels , out_channels=__lowercase , kernel_size=3 , stride=2 , )
__lowercase =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowercase =nn.ModuleList()
for i in range(13 ):
__lowercase =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=3 , stride=strides[i] , groups=__lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=1 , ) )
__lowercase =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def snake_case ( self : List[Any] , __lowercase : List[str] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case ( self : Optional[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowercase =self.conv_stem(__lowercase )
__lowercase =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowercase =layer_module(__lowercase )
if output_hidden_states:
__lowercase =all_hidden_states + (hidden_states,)
__lowercase =hidden_states
if self.pooler is not None:
__lowercase =torch.flatten(self.pooler(__lowercase ) , start_dim=1 )
else:
__lowercase =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=__lowercase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A , )
class lowerCAmelCase ( A ):
def __init__( self : Tuple , __lowercase : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config.num_labels
__lowercase =MobileNetVaModel(__lowercase )
__lowercase =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowercase =nn.Dropout(config.classifier_dropout_prob , inplace=__lowercase )
__lowercase =nn.Linear(__lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case ( self : Dict , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
__lowercase =self.mobilenet_va(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase )
__lowercase =outputs.pooler_output if return_dict else outputs[1]
__lowercase =self.classifier(self.dropout(__lowercase ) )
__lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase ='single_label_classification'
else:
__lowercase ='multi_label_classification'
if self.config.problem_type == "regression":
__lowercase =MSELoss()
if self.num_labels == 1:
__lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase =loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
__lowercase =CrossEntropyLoss()
__lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase =BCEWithLogitsLoss()
__lowercase =loss_fct(__lowercase , __lowercase )
if not return_dict:
__lowercase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , )
| 141 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = "bit"
lowerCAmelCase_ = ["preactivation", "bottleneck"]
lowerCAmelCase_ = ["SAME", "VALID"]
def __init__( self : Union[str, Any] , __lowercase : Tuple=3 , __lowercase : Tuple=64 , __lowercase : List[str]=[256, 512, 1024, 2048] , __lowercase : int=[3, 4, 6, 3] , __lowercase : Optional[Any]="preactivation" , __lowercase : str="relu" , __lowercase : Tuple=None , __lowercase : int=32 , __lowercase : int=0.0 , __lowercase : Dict=False , __lowercase : List[Any]=32 , __lowercase : List[str]=1 , __lowercase : str=None , __lowercase : Any=None , **__lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =global_padding
__lowercase =num_groups
__lowercase =drop_path_rate
__lowercase =embedding_dynamic_padding
__lowercase =output_stride
__lowercase =width_factor
__lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 141 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = '▁'
lowercase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
lowercase_ = {
'google/pegasus-xsum': 5_1_2,
}
class A_ ( _a ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PegasusTokenizer
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self: Any , a: List[Any]=None , a: str=None , a: str="<pad>" , a: Optional[int]="</s>" , a: Dict="<unk>" , a: Optional[int]="<mask_2>" , a: Union[str, Any]="<mask_1>" , a: Dict=None , a: Optional[int]=103 , **a: List[Any] , ):
__lowerCamelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
F'additional_special_tokens should be of type {type(snake_case_ )}, but is'
F' {type(snake_case_ )}' )
__lowerCamelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowerCamelCase : int = additional_special_tokens_extended
else:
__lowerCamelCase : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , pad_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self: Any , a: Dict ):
__lowerCamelCase : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self: Optional[Any] , a: List , a: Optional[List] = None , a: bool = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self: Dict , a: Optional[int] , a: Dict=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self: Optional[Any] , a: str , a: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Optional[int] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 371 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = "open-llama"
def __init__( self : Optional[int] , A : Optional[Any]=10_00_00 , A : List[Any]=40_96 , A : Any=1_10_08 , A : int=32 , A : Dict=32 , A : Optional[Any]="silu" , A : Union[str, Any]=20_48 , A : Optional[Any]=0.02 , A : Union[str, Any]=1e-6 , A : Optional[Any]=True , A : str=0 , A : Tuple=1 , A : List[Any]=2 , A : Tuple=False , A : List[Any]=True , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : str=True , A : List[str]=True , A : str=None , **A : int , ) -> Dict:
lowercase_ : Tuple = vocab_size
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[int] = intermediate_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = rms_norm_eps
lowercase_ : Any = use_cache
lowercase_ : List[Any] = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCamelCase__ )
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_dropout_prob
lowercase_ : Optional[Any] = use_stable_embedding
lowercase_ : Optional[Any] = shared_input_output_embedding
lowercase_ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def A ( self : Any ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : Any = self.rope_scaling.get('''type''' , UpperCamelCase__ )
lowercase_ : str = self.rope_scaling.get('''factor''' , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=99 , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=9 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=8 , UpperCamelCase_=0.1 , UpperCamelCase_=0.002 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :Optional[Any] = batch_size
UpperCamelCase__ :str = encoder_seq_length
UpperCamelCase__ :Optional[Any] = decoder_seq_length
# For common tests
UpperCamelCase__ :List[str] = self.decoder_seq_length
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :str = use_attention_mask
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :Optional[Any] = vocab_size
UpperCamelCase__ :List[Any] = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[int] = d_ff
UpperCamelCase__ :Optional[Any] = relative_attention_num_buckets
UpperCamelCase__ :List[str] = dropout_rate
UpperCamelCase__ :Union[str, Any] = initializer_factor
UpperCamelCase__ :Any = eos_token_id
UpperCamelCase__ :str = pad_token_id
UpperCamelCase__ :str = decoder_start_token_id
UpperCamelCase__ :int = None
UpperCamelCase__ :str = decoder_layers
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ :Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ :Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ :Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase_ )
if decoder_head_mask is None:
UpperCamelCase__ :Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
if cross_attn_head_mask is None:
UpperCamelCase__ :Any = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ :Tuple = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ :Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ :int = self.get_config()
UpperCamelCase__ :int = config.num_attention_heads
UpperCamelCase__ :Union[str, Any] = self.prepare_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, input_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = UMTaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(
input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , )
UpperCamelCase__ :Tuple = model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
UpperCamelCase__ :Tuple = result.last_hidden_state
UpperCamelCase__ :Union[str, Any] = result.past_key_values
UpperCamelCase__ :List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = UMTaModel(config=UpperCamelCase_ ).get_decoder().to(UpperCamelCase_ ).eval()
# first forward pass
UpperCamelCase__ :Any = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCamelCase__ :int = model(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
UpperCamelCase__ , UpperCamelCase__ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ :List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :List[str] = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Any = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ :List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = UMTaModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).half().eval()
UpperCamelCase__ :Tuple = model(**UpperCamelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCamelCase_ ).any().item() )
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_a = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_a = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = True
_a = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_a = [0.8, 0.9]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ :Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ :str = config_and_inputs[0]
UpperCamelCase__ :str = UMTaForConditionalGeneration(UpperCamelCase_ ).eval()
model.to(UpperCamelCase_ )
UpperCamelCase__ :str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ ),
}
for attn_name, (name, mask) in zip(UpperCamelCase_ , head_masking.items() ):
UpperCamelCase__ :List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase__ :Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase_ )
UpperCamelCase__ :int = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , **UpperCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase__ :Optional[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCamelCase_ , legacy=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCamelCase__ :Union[str, Any] = tokenizer(UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ ).input_ids
# fmt: off
UpperCamelCase__ :Optional[Any] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = model.generate(input_ids.to(UpperCamelCase_ ) )
UpperCamelCase__ :str = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCamelCase__ :List[str] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 219 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt'''}
__snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__snake_case = {
'''openbmb/cpm-ant-10b''': 1024,
}
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[str] = collections.OrderedDict()
with open(__a , '''r''' , encoding='''utf-8''' ) as reader:
UpperCamelCase__ :Dict = reader.readlines()
for index, token in enumerate(__a ):
UpperCamelCase__ :str = token.rstrip('''\n''' )
UpperCamelCase__ :Optional[int] = index
return vocab
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<unk>" , UpperCamelCase_=200 ):
'''simple docstring'''
UpperCamelCase__ :Tuple = vocab
UpperCamelCase__ :List[str] = unk_token
UpperCamelCase__ :Tuple = max_input_chars_per_word
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :str = []
while start < len(UpperCamelCase_ ):
UpperCamelCase__ :int = len(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = None
while start < end:
UpperCamelCase__ :int = ''''''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase__ :List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
UpperCamelCase__ :Any = end
return sub_tokens
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
_a = False
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<d>" , UpperCamelCase_="</d>" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<unk>" , UpperCamelCase_="</n>" , UpperCamelCase_="</_>" , UpperCamelCase_="left" , **UpperCamelCase_ , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = bod_token
UpperCamelCase__ :Dict = eod_token
UpperCamelCase__ :Optional[int] = load_vocab(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.encoder[space_token]
UpperCamelCase__ :List[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase__ :Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ :List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [i for i in token_ids if i >= 0]
UpperCamelCase__ :Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return token in self.encoder
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if os.path.isdir(UpperCamelCase_ ):
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase__ :str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase__ :Any = 0
if " " in self.encoder:
UpperCamelCase__ :Dict = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase__ :List[str] = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase__ :List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase__ :Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ )) | 219 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=3 , snake_case__=224 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase : List[str] = size if size is not None else {"height": 18, "width": 18}
lowerCAmelCase : Any = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Union[str, Any] = min_resolution
lowerCAmelCase : Dict = max_resolution
lowerCAmelCase : List[str] = do_resize
lowerCAmelCase : Optional[Any] = size
lowerCAmelCase : Optional[int] = do_normalize
lowerCAmelCase : Optional[Any] = image_mean
lowerCAmelCase : Any = image_std
def lowercase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCAmelCase : Any = image_processor(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : int = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processor(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCAmelCase : str = image_processor(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 108 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : int =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Dict = VideoClassificationPipeline(model=snake_case__ , image_processor=snake_case__ , top_k=2 )
lowerCAmelCase : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for example in examples:
lowerCAmelCase : str = video_classifier(snake_case__ )
self.assertEqual(
snake_case__ , [
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase : str = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase : int = pipeline(
"video-classification" , model=snake_case__ , feature_extractor=snake_case__ , frame_sampling_rate=4 )
lowerCAmelCase : Optional[int] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Union[str, Any] = video_classifier(snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
lowerCAmelCase : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = """path-to-your-trained-model"""
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_SCREAMING_SNAKE_CASE : Dict = """A photo of sks dog in a bucket"""
_SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 157 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
UpperCamelCase__ : List[str] =kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.image_processor
def __call__( self : Optional[int] , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCamelCase__ : str =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCamelCase__ : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowerCAmelCase ( self : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[str] =self.tokenizer.model_input_names
UpperCamelCase__ : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
| 157 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
a : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
a : Optional[Any] = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a : Tuple = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a : Dict = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
a : Any = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : int = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _lowercase )
return [m.group(0 ) for m in matches]
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase : Tuple = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase : Union[str, Any] = collections.defaultdict(_lowercase )
UpperCAmelCase : Optional[Any] = collections.defaultdict(_lowercase )
UpperCAmelCase : Optional[Any] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
UpperCAmelCase : Tuple = None
if _re_tf_models.match(_lowercase ) is not None:
UpperCAmelCase : Tuple = tf_models
UpperCAmelCase : Dict = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
UpperCAmelCase : Optional[Any] = flax_models
UpperCAmelCase : str = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
UpperCAmelCase : Any = pt_models
UpperCAmelCase : Optional[Any] = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase : List[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
UpperCAmelCase : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase : Tuple = list(_lowercase )
all_models.sort()
UpperCAmelCase : Optional[int] = {"""model_type""": all_models}
UpperCAmelCase : Optional[Any] = [pt_models[t] for t in all_models]
UpperCAmelCase : List[str] = [tf_models[t] for t in all_models]
UpperCAmelCase : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase : Optional[int] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase : Tuple = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase : Optional[Any] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase : Any = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase : Optional[int] = """AutoTokenizer"""
UpperCAmelCase : List[Any] = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def __lowerCamelCase ( _lowercase ) -> str:
UpperCAmelCase : Any = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase : str = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
UpperCAmelCase : Optional[Any] = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase , _lowercase , _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase , _lowercase ):
continue
# First extract all model_names
UpperCAmelCase : Tuple = []
for name in getattr(_lowercase , _lowercase ).values():
if isinstance(_lowercase , _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : Tuple = get_frameworks_table()
UpperCAmelCase : str = Dataset.from_pandas(_lowercase )
UpperCAmelCase : Optional[Any] = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=_lowercase )
UpperCAmelCase : Dict = Dataset.from_json(_lowercase )
UpperCAmelCase : Optional[int] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
UpperCAmelCase : Dict = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase : Dict = sorted(table.keys() )
UpperCAmelCase : Optional[int] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
UpperCAmelCase : int = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase , """pipeline_tags.json""" ) )
if commit_sha is not None:
UpperCAmelCase : Union[str, Any] = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
UpperCAmelCase : Union[str, Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=_lowercase , repo_type="""dataset""" , token=_lowercase , commit_message=_lowercase , )
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase , (list, tuple) ):
UpperCAmelCase : List[str] = model[0]
UpperCAmelCase : List[str] = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
UpperCAmelCase : Any = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
a : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 265 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 1 |
def a__ ( UpperCAmelCase : str ) -> List[str]:
stooge(UpperCAmelCase , 0 , len(UpperCAmelCase ) - 1 )
return arr
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase , UpperCAmelCase : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase : Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase , i + t , (UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
if __name__ == "__main__":
_lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : List[str] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 99 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : Any, __A : Optional[Any], __A : Optional[Any]=3, __A : Union[str, Any]=3_2, __A : Optional[int]=3, __A : str=1_0, __A : Union[str, Any]=[8, 1_6, 3_2, 6_4], __A : List[str]=[1, 1, 2, 1], __A : Dict=True, __A : List[Any]=True, __A : int="relu", __A : Optional[Any]=3, __A : Any=None, __A : Any=["stage2", "stage3", "stage4"], __A : Optional[int]=[2, 3, 4], __A : Any=1, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[Any] = embeddings_size
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : Any = depths
UpperCAmelCase : int = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Any = num_labels
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : int = len(__A )
UpperCAmelCase : Union[str, Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Tuple = num_groups
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Tuple ):
return BitConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, )
def __magic_name__ ( self : int, __A : str, __A : List[Any], __A : Any ):
UpperCAmelCase : Optional[int] = BitModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def __magic_name__ ( self : List[Any], __A : Any, __A : Union[str, Any], __A : Dict ):
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = BitForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int], __A : Any, __A : List[str], __A : str ):
UpperCAmelCase : Any = BitBackbone(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = BitBackbone(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = BitModelTester(self )
UpperCAmelCase : Any = ConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __magic_name__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __magic_name__ ( self : Dict ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
def __magic_name__ ( self : Dict ):
def check_hidden_states_output(__A : List[Any], __A : Optional[int], __A : int ):
UpperCAmelCase : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Any = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : int = layer_type
UpperCAmelCase : List[Any] = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Dict = True
check_hidden_states_output(__A, __A, __A )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __magic_name__ ( self : List[str] ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __magic_name__ ( self : List[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**__A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : int = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase = BitConfig
UpperCamelCase = False
def __magic_name__ ( self : int ):
UpperCAmelCase : int = BitModelTester(self )
| 99 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_ = self.decoder_seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = decoder_start_token_id
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = decoder_seq_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _UpperCamelCase ( self , _A , _A , _A , _A , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A ).to(_A ).eval()
SCREAMING_SNAKE_CASE_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
SCREAMING_SNAKE_CASE_ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(_A )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ =(TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ =True
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def _UpperCamelCase ( self ) -> Any:
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _UpperCamelCase ( self ) -> Optional[int]:
pass
| 299 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__UpperCAmelCase : Any = "src/diffusers"
# Matches is_xxx_available()
__UpperCAmelCase : List[str] = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__UpperCAmelCase : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__UpperCAmelCase : int = "\n{0} = None\n"
__UpperCAmelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
__UpperCAmelCase : Tuple = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: int = _re_backend.findall(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__)
def A__ ( ) -> Optional[int]:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""") , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__snake_case: Tuple = 0
__snake_case: Any = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__snake_case: List[Any] = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith("""else:"""):
line_index += 1
line_index += 1
__snake_case: Any = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__) and len(lines[line_index]) > 1:
__snake_case: List[Any] = lines[line_index]
__snake_case: str = _re_single_line_import.search(SCREAMING_SNAKE_CASE__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__)
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
if backend_specific_objects is None:
__snake_case: Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__snake_case: Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
__snake_case: List[Any] = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""")) + """]"""
__snake_case: Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for o in objects])
__snake_case: List[str] = dummy_file
return dummy_files
def A__ ( SCREAMING_SNAKE_CASE__=False) -> int:
__snake_case: List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__snake_case: Dict = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__snake_case: Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""")
__snake_case: List[Any] = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py''')
for backend in dummy_files.keys()
}
__snake_case: int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Any = f.read()
else:
__snake_case: Tuple = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py as the main '''
"""__init__ has new objects.""")
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py. Run `make fix-copies` '''
"""to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : List[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 111 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[Any] , __a :Tuple , __a :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Dict ) -> List[Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
def signal_handler(__a :Optional[int] , __a :List[str] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Optional[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def a_ ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def a_ ( self : Dict , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Any:
"""simple docstring"""
raise OSError
def a_ ( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Tuple=None ) -> int:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 276 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[int] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''roberta'''
def __init__( self : Any , __lowerCAmelCase : Tuple=5_02_65 , __lowerCAmelCase : Optional[int]=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Optional[int] ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A__ : List[Any] ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A__ : Optional[int] ={"""facebook/blenderbot_small-90M""": 5_12}
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
_lowerCAmelCase = set(__snake_case )
return pairs
class UpperCAmelCase ( lowerCamelCase__ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __snake_case : List[Any] , __snake_case : int , __snake_case : Tuple="__start__" , __snake_case : int="__end__" , __snake_case : Optional[Any]="__unk__" , __snake_case : Optional[int]="__null__" , **__snake_case : Tuple , ) -> List[Any]:
super().__init__(unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , **__snake_case )
with open(__snake_case , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase = json.load(__snake_case )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = {}
@property
def lowercase__ ( self : int ) -> List[Any]:
return len(self.encoder )
def lowercase__ ( self : Optional[int] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = re.sub("""([.,!?()])""" , R""" \1""" , __snake_case )
_lowerCAmelCase = re.sub("""(\')""" , R""" \1 """ , __snake_case )
_lowerCAmelCase = re.sub(R"""\s{2,}""" , """ """ , __snake_case )
if "\n" in token:
_lowerCAmelCase = token.replace("""\n""" , """ __newln__""" )
_lowerCAmelCase = token.split(""" """ )
_lowerCAmelCase = []
for token in tokens:
if not len(__snake_case ):
continue
_lowerCAmelCase = token.lower()
_lowerCAmelCase = tuple(__snake_case )
_lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase = get_pairs(__snake_case )
if not pairs:
words.append(__snake_case )
continue
while True:
_lowerCAmelCase = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(__snake_case ):
try:
_lowerCAmelCase = word.index(__snake_case , __snake_case )
new_word.extend(word[i:j] )
_lowerCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(__snake_case )
_lowerCAmelCase = new_word
if len(__snake_case ) == 1:
break
else:
_lowerCAmelCase = get_pairs(__snake_case )
_lowerCAmelCase = """@@ """.join(__snake_case )
_lowerCAmelCase = word[:-4]
_lowerCAmelCase = word
words.append(__snake_case )
return " ".join(__snake_case )
def lowercase__ ( self : Any , __snake_case : int ) -> Union[str, Any]:
_lowerCAmelCase = []
_lowerCAmelCase = re.findall(R"""\S+\n?""" , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : Dict , __snake_case : Dict ) -> List[str]:
_lowerCAmelCase = token.lower()
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] ) -> int:
return self.decoder.get(__snake_case , self.unk_token )
def lowercase__ ( self : Any , __snake_case : str ) -> Union[str, Any]:
_lowerCAmelCase = """ """.join(__snake_case ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any] = None ) -> str:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + """\n""" )
_lowerCAmelCase = 0
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(""" """.join(__snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
| 70 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase : Dict = sys.version_info >= (3, 10)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class __magic_name__ :
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __magic_name__ :
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase_ : str = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
lowercase_ : int = {k: v for k, v in vars(lowercase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase_ ) and yy.get("""choices""" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase_ ) , yy["""type"""](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--bar""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--baz""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--flag""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : int = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(lowercase_ ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
lowercase_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : List[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Tuple = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase_ : Any = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase_ : str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase_ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
lowercase_ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase_ : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Dict = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase_ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--bar""" , default=lowercase_ , type=lowercase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase_ , type=lowercase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase_ )
lowercase_ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase_ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase_ : Dict = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase_ : int = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
lowercase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase_ , required=lowercase_ )
expected.add_argument("""--required_str""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = HfArgumentParser(lowercase_ )
lowercase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase_ , )
expected.add_argument("""--opt""" , type=lowercase_ , default=lowercase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = HfArgumentParser(lowercase_ )
lowercase_ : List[str] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowercase_ : str = parser.parse_dict(lowercase_ )[0]
lowercase_ : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = HfArgumentParser(lowercase_ )
lowercase_ : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase_ : Optional[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = os.path.join(lowercase_ , """temp_json""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase_ : Union[str, Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = HfArgumentParser(lowercase_ )
lowercase_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = os.path.join(lowercase_ , """temp_yaml""" )
os.mkdir(lowercase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase_ : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 356 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A : Optional[Any] = " \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCamelCase__ : str = self.diffusers_dir
shutil.copy(
os.path.join(__lowerCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None ):
'''simple docstring'''
lowerCamelCase__ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCamelCase__ : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCamelCase__ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase__ : List[Any] = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(__lowerCamelCase , "w" , newline="\n" ) as f:
f.write(__lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCamelCase )
with open(__lowerCamelCase , "r" ) as f:
self.assertTrue(f.read() , __lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : int = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __lowerCamelCase ) , )
# Copy consistency with a really long name
lowerCamelCase__ : Dict = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("Bert" , __lowerCamelCase , __lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __lowerCamelCase , overwrite_result=re.sub("DDPM" , "Test" , __lowerCamelCase ) , )
| 184 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case , speech_processor=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = "auto" ):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self , snake_case , snake_case=1_6000 , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
lowercase = self.speech_processor.feature_extractor(
snake_case , return_tensors='pt' , sampling_rate=snake_case ).input_features.to(self.device )
lowercase = self.speech_model.generate(snake_case , max_length=48_0000 )
lowercase = self.speech_processor.tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , normalize=snake_case )[
0
]
if isinstance(snake_case , snake_case ):
lowercase = 1
elif isinstance(snake_case , snake_case ):
lowercase = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(snake_case )}.''' )
# get prompt text embeddings
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , snake_case , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [''] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
lowercase = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 , snake_case , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case ).to(
self.device )
else:
lowercase = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
lowercase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
lowercase = 1 / 0.18_215 * latents
lowercase = self.vae.decode(snake_case ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 195 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowercase ( __snake_case = "AAPL" ) -> str:
__lowerCAmelCase : Tuple = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__lowerCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(__snake_case ).text ,"html.parser" )
__lowerCAmelCase : Optional[Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" ,class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""") | 58 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states) | 58 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = """google/mobilebert-uncased"""
def A ( self : Any ) -> int:
super().setUp()
UpperCAmelCase : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = '''unwanted, running'''
return input_text, output_text
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def A ( self : Any ) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Any = self.get_rust_tokenizer()
UpperCAmelCase : int = '''UNwant\u00E9d,running'''
UpperCAmelCase : Dict = tokenizer.tokenize(__snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : int = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : int = tokenizer.encode(__snake_case )
UpperCAmelCase : str = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
UpperCAmelCase : Dict = self.get_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : Dict = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = tokenizer.tokenize(__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(__snake_case )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Any = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Tuple ) -> Any:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase : Tuple = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase : List[str] = i
UpperCAmelCase : str = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self : Union[str, Any] ) -> Tuple:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self : Union[str, Any] ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self : Optional[int] ) -> Tuple:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : Optional[int] = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
UpperCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False
UpperCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : str = ['''的''', '''人''', '''有''']
UpperCAmelCase : List[Any] = ''''''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 23 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = F'facebook/wmt19-{pair}'
A__ = self.get_tokenizer(lowercase )
A__ = self.get_model(lowercase )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A__ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 68 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 116 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase = None
try:
import msvcrt
except ImportError:
__UpperCamelCase = None
try:
import fcntl
except ImportError:
__UpperCamelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase = OSError
# Data
# ------------------------------------------------
__UpperCamelCase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__UpperCamelCase = '''3.0.12'''
__UpperCamelCase = None
def lowercase () -> Optional[int]:
global _logger
SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self ) -> Tuple:
SCREAMING_SNAKE_CASE = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self ) -> Optional[Any]:
return self.lock
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=-1 , lowerCAmelCase__=None ) -> List[Any]:
SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(lowerCAmelCase__ , lowerCAmelCase__ )
# The path to the lock file.
SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
SCREAMING_SNAKE_CASE = None
# The default timeout value.
SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
SCREAMING_SNAKE_CASE = 0
return None
@property
def __A ( self ) -> Union[str, Any]:
return self._lock_file
@property
def __A ( self ) -> int:
return self._timeout
@timeout.setter
def __A ( self , lowerCAmelCase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE = float(lowerCAmelCase__ )
return None
def __A ( self ) -> List[Any]:
raise NotImplementedError()
def __A ( self ) -> int:
raise NotImplementedError()
@property
def __A ( self ) -> List[str]:
return self._lock_file_fd is not None
def __A ( self , lowerCAmelCase__=None , lowerCAmelCase__=0.05 ) -> Tuple:
# Use the default timeout, if no timeout is provided.
if timeout is None:
SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
SCREAMING_SNAKE_CASE = id(self )
SCREAMING_SNAKE_CASE = self._lock_file
SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(lowerCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __A ( self , lowerCAmelCase__=False ) -> Optional[int]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
SCREAMING_SNAKE_CASE = id(self )
SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
SCREAMING_SNAKE_CASE = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> int:
self.acquire()
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
self.release()
return None
def __del__( self ) -> List[str]:
self.release(force=lowerCAmelCase__ )
return None
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = os.path.basename(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > max_length and max_length > 0:
SCREAMING_SNAKE_CASE = os.path.dirname(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = str(hash(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = filename[: max_length - len(lowerCAmelCase__ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return path
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=-1 , lowerCAmelCase__=None ) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = fd
return None
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self._lock_file_fd
SCREAMING_SNAKE_CASE = None
msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=-1 , lowerCAmelCase__=None ) -> Any:
SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(lowerCAmelCase__ ) ).f_namemax
super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase__ )
try:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = fd
return None
def __A ( self ) -> Dict:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
SCREAMING_SNAKE_CASE = self._lock_file_fd
SCREAMING_SNAKE_CASE = None
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
os.close(lowerCAmelCase__ )
return None
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase__ )
except OSError:
pass
else:
SCREAMING_SNAKE_CASE = fd
return None
def __A ( self ) -> List[Any]:
os.close(self._lock_file_fd )
SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase = None
if msvcrt:
__UpperCamelCase = WindowsFileLock
elif fcntl:
__UpperCamelCase = UnixFileLock
else:
__UpperCamelCase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 113 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = (DDPMParallelScheduler,)
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Any = samplea.shape[0]
SCREAMING_SNAKE_CASE_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ : str = torch.arange(lowerCAmelCase__ )[0:3, None].repeat(1 , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.batch_step_no_noise(lowerCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : int = pred_prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = -1
else:
SCREAMING_SNAKE_CASE_ : List[str] = timesteps[i + 1]
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.previous_timestep(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 363 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a__ ( ):
raise RuntimeError('CUDA out of memory.' )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Tuple = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : str = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 162 | 0 |
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def a_ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 307 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_lowerCamelCase = """CompVis/stable-diffusion-v1-1"""
_lowerCamelCase = """CompVis/stable-diffusion-v1-2"""
_lowerCamelCase = """CompVis/stable-diffusion-v1-3"""
_lowerCamelCase = """CompVis/stable-diffusion-v1-4"""
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
super()._init_()
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(_snake_case )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(_snake_case )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
vae=_snake_case ,text_encoder=_snake_case ,tokenizer=_snake_case ,unet=_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case ,feature_extractor=_snake_case ,requires_safety_checker=_snake_case ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def UpperCamelCase__ ( self ):
return {k: getattr(self ,_snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def UpperCamelCase__ ( self ,_snake_case = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def UpperCamelCase__ ( self ):
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,):
return self.pipea(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
@torch.no_grad()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,):
return self.pipea(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
@torch.no_grad()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,):
return self.pipea(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
@torch.no_grad()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,):
return self.pipea(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
@torch.no_grad()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = 5_12 ,_snake_case = 5_12 ,_snake_case = 50 ,_snake_case = 7.5 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,_snake_case = None ,_snake_case = 1 ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(_snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : Optional[Any] = self.textaimg_sda_a(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : Union[str, Any] = self.textaimg_sda_a(
prompt=_snake_case ,height=_snake_case ,width=_snake_case ,num_inference_steps=_snake_case ,guidance_scale=_snake_case ,negative_prompt=_snake_case ,num_images_per_prompt=_snake_case ,eta=_snake_case ,generator=_snake_case ,latents=_snake_case ,output_type=_snake_case ,return_dict=_snake_case ,callback=_snake_case ,callback_steps=_snake_case ,**_snake_case ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 67 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a__ ( _SCREAMING_SNAKE_CASE : str = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def a__ ( _SCREAMING_SNAKE_CASE : str = "" ) -> bool:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return True
UpperCAmelCase_ : List[str] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase_ : dict[str, int] = {}
for character in lower_case_input_str:
UpperCAmelCase_ : Any = character_freq_dict.get(_SCREAMING_SNAKE_CASE , 0 ) + 1
UpperCAmelCase_ : Union[str, Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( _SCREAMING_SNAKE_CASE : str = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , _SCREAMING_SNAKE_CASE , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_lowerCamelCase = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_lowerCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 67 | 1 |
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
lowerCAmelCase__ = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 241 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = "utf-8"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True # deprecated
UpperCamelCase = None # deprecated
UpperCamelCase = 10 << 20 # 10MB
UpperCamelCase = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase = JsonConfig
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowercase__ ( self : str , _UpperCAmelCase : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
UpperCAmelCase_ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ = dataset
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase , "rb" ) as f:
UpperCAmelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ = paj.read_json(
io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 241 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase_ = '''sshleifer/mar_enro_6_3_student'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
super().setUp()
UpperCamelCase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCAmelCase_ (self ):
MarianMTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = SummarizationModule.add_model_specific_args(SCREAMING_SNAKE_CASE_ , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = main(SCREAMING_SNAKE_CASE_ )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , SCREAMING_SNAKE_CASE_ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(SCREAMING_SNAKE_CASE_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __A( __lowerCamelCase ):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCAmelCase_ (self ):
UpperCamelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCamelCase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
UpperCamelCase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
UpperCamelCase__ = bash_script.replace(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" )
UpperCamelCase__ = 6
UpperCamelCase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
F"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(SCREAMING_SNAKE_CASE_ , os.getcwd() )
UpperCamelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase__ = distill_main(SCREAMING_SNAKE_CASE_ )
# Check metrics
UpperCamelCase__ = load_json(model.metrics_save_path )
UpperCamelCase__ = metrics["""val"""][0]
UpperCamelCase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , SCREAMING_SNAKE_CASE_ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase__ = {os.path.basename(SCREAMING_SNAKE_CASE_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 244 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 1 |
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(a_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowercase: int = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase_ ) )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = [sequences]
a = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_=ZeroShotClassificationArgumentHandler() , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = args_parser
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=TruncationStrategy.ONLY_FIRST , **lowerCamelCase_ ):
"""simple docstring"""
a = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a = self.tokenizer.eos_token
try:
a = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )
except Exception as e:
if "too short" in str(lowerCamelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
if kwargs.get("multi_class" , lowerCamelCase_ ) is not None:
a = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a = {}
if "candidate_labels" in kwargs:
a = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a = kwargs["hypothesis_template"]
a = {}
if "multi_label" in kwargs:
a = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ , ):
"""simple docstring"""
if len(lowerCamelCase_ ) == 0:
pass
elif len(lowerCamelCase_ ) == 1 and "candidate_labels" not in kwargs:
a = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This example is {}." ):
"""simple docstring"""
a , a = self._args_parser(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
a = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase_ ) - 1,
**model_input,
}
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = inputs["candidate_label"]
a = inputs["sequence"]
a = {k: inputs[k] for k in self.tokenizer.model_input_names}
a = self.model(**lowerCamelCase_ )
a = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = [outputs["candidate_label"] for outputs in model_outputs]
a = [outputs["sequence"] for outputs in model_outputs]
a = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a = logits.shape[0]
a = len(lowerCamelCase_ )
a = N // n
a = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a = self.entailment_id
a = -1 if entailment_id == 0 else 0
a = reshaped_outputs[..., [contradiction_id, entailment_id]]
a = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
a = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a = reshaped_outputs[..., self.entailment_id]
a = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ )
a = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 227 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = 50 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase_ ), "This is a local test"
| 227 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = botoa.client('iam' )
SCREAMING_SNAKE_CASE = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE_ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
SCREAMING_SNAKE_CASE = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE_ , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE_ )["Role"]["Arn"]
def lowercase () -> Optional[Any]:
SCREAMING_SNAKE_CASE = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
SCREAMING_SNAKE_CASE = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
SCREAMING_SNAKE_CASE = _ask_field('AWS Access Key ID: ' )
SCREAMING_SNAKE_CASE = aws_access_key_id
SCREAMING_SNAKE_CASE = _ask_field('AWS Secret Access Key: ' )
SCREAMING_SNAKE_CASE = aws_secret_access_key
SCREAMING_SNAKE_CASE = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
SCREAMING_SNAKE_CASE = aws_region
SCREAMING_SNAKE_CASE = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , SCREAMING_SNAKE_CASE_ , )
if role_management == 0:
SCREAMING_SNAKE_CASE = _ask_field('Enter your IAM role name: ' )
else:
SCREAMING_SNAKE_CASE = 'accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE = _ask_field('Enter your Docker image: ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
SCREAMING_SNAKE_CASE = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_dynamo:
SCREAMING_SNAKE_CASE = 'dynamo_'
SCREAMING_SNAKE_CASE = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_custom_options:
SCREAMING_SNAKE_CASE = _ask_options(
'Which mode do you want to use?' , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE_ )] , default='default' , )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
SCREAMING_SNAKE_CASE = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE = _ask_options(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE = _ask_field(SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , default='ml.p3.2xlarge' )
SCREAMING_SNAKE_CASE = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE = _ask_field(
'How many machines do you want use? [1]: ' , SCREAMING_SNAKE_CASE_ , default=1 , )
SCREAMING_SNAKE_CASE = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE_ , use_cpu=SCREAMING_SNAKE_CASE_ , dynamo_config=SCREAMING_SNAKE_CASE_ , eca_instance_type=SCREAMING_SNAKE_CASE_ , profile=SCREAMING_SNAKE_CASE_ , region=SCREAMING_SNAKE_CASE_ , iam_role_name=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ , num_machines=SCREAMING_SNAKE_CASE_ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE_ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE_ , )
| 38 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.q_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.k_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.v_proj.' )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__UpperCamelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 38 | 1 |
def lowerCAmelCase_ ( __A = 1_000_000 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = set(range(3, UpperCamelCase__, 2 ) )
primes.add(2 )
for p in range(3, UpperCamelCase__, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, UpperCamelCase__, UpperCamelCase__ ) ) )
UpperCAmelCase__ = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__, limit + 1, UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'ibert'
def __init__( self : int , lowercase__ : Optional[int]=30_522 , lowercase__ : int=768 , lowercase__ : List[Any]=12 , lowercase__ : Optional[Any]=12 , lowercase__ : Optional[Any]=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : Optional[int]=0.1 , lowercase__ : Any=0.1 , lowercase__ : Dict=512 , lowercase__ : Dict=2 , lowercase__ : Any=0.02 , lowercase__ : Dict=1e-12 , lowercase__ : Union[str, Any]=1 , lowercase__ : Optional[Any]=0 , lowercase__ : Any=2 , lowercase__ : Dict="absolute" , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]="none" , **lowercase__ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = quant_mode
lowerCAmelCase__ = force_dequant
class a_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __snake_case ( self : int):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 367 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class a_ :
'''simple docstring'''
def __call__( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Union[bool, str] = False , lowercase__ : Union[bool, str] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = titles if not isinstance(lowercase__ , lowercase__) else [titles]
lowerCAmelCase__ = texts if not isinstance(lowercase__ , lowercase__) else [texts]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = questions if not isinstance(lowercase__ , lowercase__) else [questions] * n_passages
if len(lowercase__) != len(lowercase__):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowercase__)} titles and {len(lowercase__)} texts.""")
lowerCAmelCase__ = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__)
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowerCAmelCase__ = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__)
def __snake_case ( self : Union[str, Any] , lowercase__ : BatchEncoding , lowercase__ : DPRReaderOutput , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : int = 4 , ):
'''simple docstring'''
lowerCAmelCase__ = reader_input['input_ids']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = sorted(range(lowercase__) , reverse=lowercase__ , key=relevance_logits.__getitem__)
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id)
else:
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : List[int] , lowercase__ : int , lowercase__ : int , ):
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(lowercase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowerCAmelCase__ = sorted(lowercase__ , key=lambda lowercase__: x[1] , reverse=lowercase__)
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""")
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = ['input_ids', 'attention_mask']
| 119 | 0 |
UpperCAmelCase : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase : int = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase = year // 100
lowerCamelCase = (5 * (century % 4) + 2) % 7
lowerCamelCase = year % 100
lowerCamelCase = centurian % 12
lowerCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case__ , unittest.TestCase ):
_a : Dict = KandinskyImgaImgPipeline
_a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_a : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_a : int = False
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_0_0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCAmelCase = MultilingualCLIP(_A )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**_A )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 92 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : Union[str, Any] = False
class __A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : int = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_lowerCAmelCase : Dict = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = generator.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ):
_lowerCAmelCase : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = """cyberpunk 2077"""
_lowerCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger """
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : int = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_lowerCAmelCase : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : List[str] = pipe.image_variation(a__ , generator=a__ , output_type="""numpy""" ).images
_lowerCAmelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 355 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_: Dict =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
SCREAMING_SNAKE_CASE_: Optional[Any] =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
SCREAMING_SNAKE_CASE_: Any =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
a__ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
a__ : Optional[float] = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
a__ : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
a__ : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _lowercase (self : Any ):
UpperCAmelCase_ = {}
if self.train_dir is not None:
UpperCAmelCase_ = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ = self.validation_dir
UpperCAmelCase_ = data_files if data_files else None
@dataclass
class __A :
a__ : str = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
a__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a__ : str = field(default=UpperCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class __A :
def __init__(self : int , __a : Union[str, Any]=192 , __a : str=32 , __a : List[Any]=4 , __a : Any=0.6 ):
UpperCAmelCase_ = input_size
UpperCAmelCase_ = mask_patch_size
UpperCAmelCase_ = model_patch_size
UpperCAmelCase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
UpperCAmelCase_ = self.input_size // self.mask_patch_size
UpperCAmelCase_ = self.mask_patch_size // self.model_patch_size
UpperCAmelCase_ = self.rand_size**2
UpperCAmelCase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self : Dict ):
UpperCAmelCase_ = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase_ = np.zeros(self.token_count , dtype=__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase_ = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase_ = split["train"]
UpperCAmelCase_ = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
UpperCAmelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case_ , "decoder_type" ):
UpperCAmelCase_ = "simmim"
# adapt config
UpperCAmelCase_ = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
UpperCAmelCase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ = AutoModelForMaskedImageModeling.from_config(snake_case_ )
if training_args.do_train:
UpperCAmelCase_ = ds["train"].column_names
else:
UpperCAmelCase_ = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ = "image"
elif "img" in column_names:
UpperCAmelCase_ = "img"
else:
UpperCAmelCase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase_ = Compose(
[
Lambda(lambda snake_case_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case_ : List[str] ):
UpperCAmelCase_ = [transforms(snake_case_ ) for image in examples[image_column_name]]
UpperCAmelCase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase_ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Initialize our trainer
UpperCAmelCase_ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ = trainer.evaluate()
trainer.log_metrics("eval" , snake_case_ )
trainer.save_metrics("eval" , snake_case_ )
# Write model card and (optionally) push to hub
UpperCAmelCase_ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 1 |
"""simple docstring"""
__magic_name__ = "Tobias Carryer"
from time import time
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=int(time())): # noqa: B008
__SCREAMING_SNAKE_CASE = multiplier
__SCREAMING_SNAKE_CASE = increment
__SCREAMING_SNAKE_CASE = modulo
__SCREAMING_SNAKE_CASE = seed
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__magic_name__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 100 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = """"""
for i in table:
res += inp[i - 1]
return res
def __UpperCAmelCase ( snake_case_ : str ) -> Tuple:
"""simple docstring"""
return data[1:] + data[0]
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> Any:
"""simple docstring"""
_lowerCAmelCase = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = int("""0b""" + data[0] + data[-1] , 2 )
_lowerCAmelCase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Dict ) -> str:
"""simple docstring"""
_lowerCAmelCase = message[:4]
_lowerCAmelCase = message[4:]
_lowerCAmelCase = apply_table(snake_case_ , snake_case_ )
_lowerCAmelCase = xor(snake_case_ , snake_case_ )
_lowerCAmelCase = apply_sbox(snake_case_ , temp[:4] ) # noqa: E741
_lowerCAmelCase = apply_sbox(snake_case_ , temp[4:] )
_lowerCAmelCase = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_lowerCAmelCase = """0""" * (2 - len(snake_case_ )) + r
_lowerCAmelCase = apply_table(l + r , snake_case_ )
_lowerCAmelCase = xor(snake_case_ , snake_case_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE : str = [6, 3, 7, 4, 8, 5, 1_0, 9]
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE : str = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE : List[str] = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE : Dict = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE : Optional[int] = temp[:5]
SCREAMING_SNAKE_CASE : str = temp[5:]
SCREAMING_SNAKE_CASE : List[Any] = left_shift(left)
SCREAMING_SNAKE_CASE : int = left_shift(right)
SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE : int = left_shift(left)
SCREAMING_SNAKE_CASE : Dict = left_shift(right)
SCREAMING_SNAKE_CASE : int = left_shift(left)
SCREAMING_SNAKE_CASE : str = left_shift(right)
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE : int = apply_table(message, IP)
SCREAMING_SNAKE_CASE : Optional[int] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : int = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE : Tuple = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : List[str] = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE : Dict = apply_table(CT, IP)
SCREAMING_SNAKE_CASE : Optional[int] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : Optional[Any] = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE : Tuple = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT) | 317 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Dict = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''wavlm'''
def __init__( self , A=3_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.1 , A=0.02 , A=1E-5 , A="group" , A="gelu" , A=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A=(5, 2, 2, 2, 2, 2, 2) , A=(1_0, 3, 3, 3, 3, 2, 2) , A=False , A=1_2_8 , A=1_6 , A=3_2_0 , A=8_0_0 , A=False , A=True , A=0.05 , A=1_0 , A=2 , A=0.0 , A=1_0 , A=3_2_0 , A=2 , A=0.1 , A=1_0_0 , A=2_5_6 , A=2_5_6 , A=0.1 , A="mean" , A=False , A=False , A=2_5_6 , A=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , A=(5, 3, 3, 1, 1) , A=(1, 2, 3, 1, 1) , A=5_1_2 , A=8_0 , A=0 , A=1 , A=2 , A=False , A=3 , A=2 , A=3 , A=None , **A , ) -> List[str]:
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : Optional[int] = list(A )
_UpperCAmelCase : Tuple = list(A )
_UpperCAmelCase : Optional[Any] = list(A )
_UpperCAmelCase : Optional[Any] = conv_bias
_UpperCAmelCase : Union[str, Any] = num_buckets
_UpperCAmelCase : Optional[Any] = max_bucket_distance
_UpperCAmelCase : List[str] = num_conv_pos_embeddings
_UpperCAmelCase : Tuple = num_conv_pos_embedding_groups
_UpperCAmelCase : str = len(self.conv_dim )
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Optional[Any] = feat_proj_dropout
_UpperCAmelCase : Union[str, Any] = final_dropout
_UpperCAmelCase : Optional[Any] = layerdrop
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Tuple = num_ctc_classes
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Tuple = do_stable_layer_norm
_UpperCAmelCase : int = use_weighted_layer_sum
_UpperCAmelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : str = apply_spec_augment
_UpperCAmelCase : Union[str, Any] = mask_time_prob
_UpperCAmelCase : Optional[Any] = mask_time_length
_UpperCAmelCase : str = mask_time_min_masks
_UpperCAmelCase : List[Any] = mask_feature_prob
_UpperCAmelCase : int = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : Any = num_codevectors_per_group
_UpperCAmelCase : Dict = num_codevector_groups
_UpperCAmelCase : List[Any] = contrastive_logits_temperature
_UpperCAmelCase : List[str] = num_negatives
_UpperCAmelCase : Tuple = codevector_dim
_UpperCAmelCase : str = proj_codevector_dim
_UpperCAmelCase : Tuple = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Tuple = ctc_loss_reduction
_UpperCAmelCase : Dict = ctc_zero_infinity
# adapter
_UpperCAmelCase : Optional[Any] = add_adapter
_UpperCAmelCase : Tuple = adapter_kernel_size
_UpperCAmelCase : Tuple = adapter_stride
_UpperCAmelCase : int = num_adapter_layers
_UpperCAmelCase : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Union[str, Any] = list(A )
_UpperCAmelCase : Optional[Any] = list(A )
_UpperCAmelCase : Tuple = list(A )
_UpperCAmelCase : Optional[Any] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Tuple ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def lowercase__( __UpperCamelCase: np.ndarray ,__UpperCamelCase: Optional[str] ,__UpperCamelCase: Optional[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = to_pil_image(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pil_image.size
SCREAMING_SNAKE_CASE : Dict = pytesseract.image_to_data(__UpperCamelCase ,lang=__UpperCamelCase ,output_type='dict' ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE : Any = [idx for idx, word in enumerate(__UpperCamelCase ) if not word.strip()]
SCREAMING_SNAKE_CASE : Optional[Any] = [word for idx, word in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : Tuple = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : int = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : Union[str, Any] = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : Union[str, Any] = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for x, y, w, h in zip(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Dict = [x, y, x + w, y + h]
actual_boxes.append(__UpperCamelCase )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
assert len(__UpperCamelCase ) == len(__UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BILINEAR, A = True, A = 1 / 255, A = True, A = None, A = None, A = True, A = None, A = "", **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(A )
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Tuple = resample
SCREAMING_SNAKE_CASE : Tuple = do_rescale
SCREAMING_SNAKE_CASE : List[str] = rescale_value
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE : List[str] = apply_ocr
SCREAMING_SNAKE_CASE : Any = ocr_lang
SCREAMING_SNAKE_CASE : str = tesseract_config
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BILINEAR, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE : Optional[Any] = (size['height'], size['width'])
return resize(A, size=A, resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A=None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(A )
SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE : Any = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, 'pytesseract' )
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = []
for image in images:
SCREAMING_SNAKE_CASE : Optional[int] = apply_tesseract(A, A, A )
words_batch.append(A )
boxes_batch.append(A )
if do_resize:
SCREAMING_SNAKE_CASE : str = [self.resize(image=A, size=A, resample=A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=A, scale=A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.normalize(image=A, mean=A, std=A ) for image in images]
SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(data={'pixel_values': images}, tensor_type=A )
if apply_ocr:
SCREAMING_SNAKE_CASE : Tuple = words_batch
SCREAMING_SNAKE_CASE : List[str] = boxes_batch
return data
| 355 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 246 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a__ :
def __init__( self , _A , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = 1_3
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 9_9
__lowerCAmelCase = 3_2
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 3_7
__lowerCAmelCase = "gelu"
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 5_1_2
__lowerCAmelCase = 1_6
__lowerCAmelCase = 2
__lowerCAmelCase = 0.02
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFEsmModel(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowerCAmelCase = model(_A )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TFEsmModel(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
__lowerCAmelCase = model(_A )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
__lowerCAmelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFEsmForMaskedLM(config=_A )
__lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFEsmForTokenClassification(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_a : List[Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Union[str, Any] = False
_a : List[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFEsmModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip("Protein models do not support embedding resizing." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowerCAmelCase = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
__lowerCAmelCase = model.get_output_embeddings()
assert x is None
__lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(_A )[0]
__lowerCAmelCase = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
__lowerCAmelCase = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowerCAmelCase = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowerCAmelCase = model(_A )[0]
# compare the actual values for a slice.
__lowerCAmelCase = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 92 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A_ :
def __init__(self :int , _UpperCamelCase :str , _UpperCamelCase :Union[str, Any]=13 , _UpperCamelCase :Any=10 , _UpperCamelCase :Dict=3 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :int=2 , _UpperCamelCase :Any=2 , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :Optional[int]=True , _UpperCamelCase :Dict=32 , _UpperCamelCase :List[Any]=5 , _UpperCamelCase :str=4 , _UpperCamelCase :Tuple=37 , _UpperCamelCase :Dict="gelu" , _UpperCamelCase :str=0.1 , _UpperCamelCase :Dict=0.1 , _UpperCamelCase :Tuple=10 , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :int=0.9 , _UpperCamelCase :List[Any]=None , )-> List[str]:
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = patch_size
__A = tubelet_size
__A = num_frames
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = mask_ratio
__A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__A = (image_size // patch_size) ** 2
__A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__A = int(mask_ratio * self.seq_length )
def _lowerCAmelCase (self :Any )-> Tuple:
__A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase (self :List[Any] )-> int:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :List[str] )-> Union[str, Any]:
__A = VideoMAEModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :int )-> int:
__A = VideoMAEForPreTraining(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__A = torch.ones((self.num_masks,) )
__A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__A = mask.expand(self.batch_size , -1 ).bool()
__A = model(_UpperCamelCase , _UpperCamelCase )
# model only returns predictions for masked patches
__A = mask.sum().item()
__A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowerCAmelCase (self :List[Any] )-> List[str]:
__A = VideoMAEModelTester(self )
__A = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :int , _UpperCamelCase :int , _UpperCamelCase :List[str]=False )-> List[Any]:
__A = copy.deepcopy(_UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__A = torch.ones((self.model_tester.num_masks,) )
__A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__A = mask.expand(self.model_tester.batch_size , -1 ).bool()
__A = bool_masked_pos.to(_UpperCamelCase )
if return_labels:
if model_class in [
*get_values(_UpperCamelCase ),
]:
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def _lowerCAmelCase (self :Dict )-> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def _lowerCAmelCase (self :Any )-> Optional[int]:
pass
def _lowerCAmelCase (self :Tuple )-> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_UpperCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCAmelCase (self :str )-> Any:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
@slow
def _lowerCAmelCase (self :Any )-> Optional[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = VideoMAEModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> List[Any]:
if not self.has_attentions:
pass
else:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
__A = self.model_tester.seq_length - self.model_tester.num_masks
__A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__A = True
__A = False
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A = len(_UpperCamelCase )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
__A = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCAmelCase (self :int )-> Optional[Any]:
def check_hidden_states_output(_UpperCamelCase :str , _UpperCamelCase :Any , _UpperCamelCase :List[Any] ):
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.hidden_states
__A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
__A = self.model_tester.seq_length - self.model_tester.num_masks
__A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase (self :List[Any] )-> str:
pass
def _a ( ) -> List[str]:
'''simple docstring'''
__A = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__A = np.load(lowerCamelCase )
return list(lowerCamelCase )
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase (self :str )-> str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase (self :Any )-> Dict:
__A = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_UpperCamelCase )
__A = self.default_image_processor
__A = prepare_video()
__A = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_UpperCamelCase )
# verify the logits
__A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
__A = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase (self :List[str] )-> int:
__A = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_UpperCamelCase )
__A = self.default_image_processor
__A = prepare_video()
__A = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# add boolean mask, indicating which patches to mask
__A = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
__A = torch.load(_UpperCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_UpperCamelCase )
# verify the logits
__A = torch.Size([1, 1408, 1536] )
__A = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_UpperCamelCase )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__A = torch.tensor([0.5_1_4_2] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__A = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_UpperCamelCase ).to(
_UpperCamelCase )
with torch.no_grad():
__A = model(**_UpperCamelCase )
__A = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1e-4 ) )
| 250 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.