code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE_( *lowercase , **lowercase ) -> int:
pass
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = DepthEstimationPipeline(model=lowercase , image_processor=lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , lowercase )
import datasets
lowerCamelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCamelCase_ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , lowercase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = "Intel/dpt-large"
lowerCamelCase_ = pipeline("depth-estimation" , model=lowercase )
lowerCamelCase_ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowerCamelCase_ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "efficientformer"
def __init__( self : int , snake_case : List[int] = [3, 2, 6, 4] , snake_case : List[int] = [48, 96, 224, 448] , snake_case : List[bool] = [True, True, True, True] , snake_case : int = 448 , snake_case : int = 32 , snake_case : int = 4 , snake_case : int = 7 , snake_case : int = 5 , snake_case : int = 8 , snake_case : int = 4 , snake_case : float = 0.0 , snake_case : int = 16 , snake_case : int = 3 , snake_case : int = 3 , snake_case : int = 3 , snake_case : int = 2 , snake_case : int = 1 , snake_case : float = 0.0 , snake_case : int = 1 , snake_case : bool = True , snake_case : bool = True , snake_case : float = 1E-5 , snake_case : str = "gelu" , snake_case : float = 0.02 , snake_case : float = 1E-12 , snake_case : int = 224 , snake_case : float = 1E-05 , **snake_case : Optional[int] , ) -> None:
super().__init__(**__A )
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = hidden_sizes
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Any = mlp_expansion_ratio
__UpperCAmelCase : Dict = downsamples
__UpperCAmelCase : List[Any] = dim
__UpperCAmelCase : int = key_dim
__UpperCAmelCase : Any = attention_ratio
__UpperCAmelCase : Dict = resolution
__UpperCAmelCase : Any = pool_size
__UpperCAmelCase : Optional[int] = downsample_patch_size
__UpperCAmelCase : Dict = downsample_stride
__UpperCAmelCase : Tuple = downsample_pad
__UpperCAmelCase : Any = drop_path_rate
__UpperCAmelCase : Tuple = num_metaad_blocks
__UpperCAmelCase : Union[str, Any] = distillation
__UpperCAmelCase : Optional[int] = use_layer_scale
__UpperCAmelCase : Tuple = layer_scale_init_value
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = batch_norm_eps | 359 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase :Tuple = "Muhammad Umer Farooq"
__UpperCAmelCase :Tuple = "MIT"
__UpperCAmelCase :Union[str, Any] = "1.0.0"
__UpperCAmelCase :Optional[int] = "Muhammad Umer Farooq"
__UpperCAmelCase :Optional[Any] = "contact@muhammadumerfarooq.me"
__UpperCAmelCase :Any = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : str ) -> None:
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : Optional[int] = domain
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str , snake_case : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : Optional[Any] = parse.urljoin(self.domain , snake_case )
self.urls.append(snake_case )
def _a ( _lowercase : str ):
'''simple docstring'''
return ".".join(get_sub_domain_name(_lowercase ).split('''.''' )[-2:] )
def _a ( _lowercase : str ):
'''simple docstring'''
return parse.urlparse(_lowercase ).netloc
def _a ( _lowercase : str = "https://github.com" ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_domain_name(_lowercase )
# Initialize the parser
__UpperCAmelCase : Dict = Parser(_lowercase )
try:
# Open URL
__UpperCAmelCase : Dict = requests.get(_lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Tuple = requests.get(_lowercase )
# Get the valid email.
__UpperCAmelCase : Dict = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :List[str] = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails))) | 240 | 0 |
from math import factorial
snake_case_ = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase__ ( snake_case_ : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case_ ) )
def lowerCamelCase__ ( ) -> int:
__snake_case = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case_ ) if sum_of_digit_factorial(snake_case_ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( snake_case_ ):
lowercase = 'megatron-bert'
def __init__( self : List[str] , snake_case : Tuple=2_9_0_5_6 , snake_case : Dict=1_0_2_4 , snake_case : Dict=2_4 , snake_case : Union[str, Any]=1_6 , snake_case : Optional[int]=4_0_9_6 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[Any]=2 , snake_case : Tuple=0.02 , snake_case : Optional[Any]=1e-12 , snake_case : str=0 , snake_case : Optional[int]="absolute" , snake_case : Union[str, Any]=True , **snake_case : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
UpperCamelCase_ : Optional[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : List[Any] = num_attention_heads
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : List[str] = intermediate_size
UpperCamelCase_ : List[Any] = hidden_dropout_prob
UpperCamelCase_ : Any = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = layer_norm_eps
UpperCamelCase_ : Dict = position_embedding_type
UpperCamelCase_ : List[str] = use_cache
| 175 | 0 |
def _a ( UpperCAmelCase = 3 , UpperCAmelCase = 7 , UpperCAmelCase = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase__ : Tuple = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase__ : int = current_numerator
lowerCamelCase__ : List[str] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 368 |
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Dict , A : Any ) ->Optional[Any]:
lowerCamelCase__ : Tuple = real
if isinstance(A , A ):
lowerCamelCase__ : Optional[int] = [1] * rank
else:
lowerCamelCase__ : List[Any] = rank
def __repr__( self : Tuple ) ->str:
return (
F"{self.real}+"
F"{'+'.join(str(A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , A )
def __add__( self : Union[str, Any] , A : int ) ->str:
if not isinstance(A , A ):
return Dual(self.real + other , self.duals )
lowerCamelCase__ : int = self.duals.copy()
lowerCamelCase__ : int = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
lowerCamelCase__ : Optional[Any] = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , A )
_UpperCAmelCase : List[Any] = __add__
def __sub__( self : Any , A : Dict ) ->int:
return self + other * -1
def __mul__( self : Optional[Any] , A : List[Any] ) ->Union[str, Any]:
if not isinstance(A , A ):
lowerCamelCase__ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , A )
lowerCamelCase__ : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , A )
_UpperCAmelCase : Optional[Any] = __mul__
def __truediv__( self : int , A : List[Any] ) ->Dict:
if not isinstance(A , A ):
lowerCamelCase__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , A )
raise ValueError
def __floordiv__( self : Dict , A : Union[str, Any] ) ->Union[str, Any]:
if not isinstance(A , A ):
lowerCamelCase__ : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , A )
raise ValueError
def __pow__( self : Any , A : List[Any] ) ->Tuple:
if n < 0 or isinstance(A , A ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if not callable(UpperCAmelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(UpperCAmelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
lowerCamelCase__ : List[str] = Dual(UpperCAmelCase , 1 )
lowerCamelCase__ : Any = func(UpperCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 265 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A__ ( unittest.TestCase ):
def A ( self : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_a , _a )
def A ( self : List[str] , **_a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def A ( self : Optional[Any] , **_a : str ) -> List[str]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def A ( self : List[str] , **_a : Union[str, Any] ) -> str:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def A ( self : Tuple ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='np' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =AlignProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 47 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A__ ( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> int:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCAmelCase__ = list_field(
default=__lowerCamelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
try:
int(SCREAMING_SNAKE_CASE__)
return True
except ValueError:
return False
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
try:
float(SCREAMING_SNAKE_CASE__)
return True
except ValueError:
return False
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , A : Union[str, Any] ):
__snake_case: Any = args
__snake_case: Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__snake_case: Any = csv.DictReader(A )
for row in reader:
__snake_case: Union[str, Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__snake_case: List[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__snake_case: List[str] = float(row["""result"""] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = plt.subplots()
__snake_case: str = """Time usage""" if self.args.is_time else """Memory usage"""
__snake_case: Optional[Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__snake_case: List[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__snake_case: List[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__snake_case: List[str] = self.result_dict[model_name]["""result"""]
((__snake_case) , (__snake_case)): str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__snake_case: str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__snake_case: Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=A , )
else:
__snake_case: int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__snake_case) , (__snake_case)): Optional[int] = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__snake_case: Union[str, Any] = np.asarray(A , A )[: len(A )]
plt.scatter(
A , A , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(A , A , """--""" )
title_str += f''' {label_model_name} vs.'''
__snake_case: Any = title_str[:-4]
__snake_case: List[Any] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(A )
plt.xlabel(A )
plt.ylabel(A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A__ ( ) -> Dict:
__snake_case: Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = parser.parse_args_into_dataclasses()[0]
__snake_case: List[Any] = Plot(args=SCREAMING_SNAKE_CASE__)
plot.plot()
if __name__ == "__main__":
main()
| 293 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCamelCase__ : Dict =img
lowerCamelCase__ : List[Any] =img.shape[1]
lowerCamelCase__ : Optional[Any] =img.shape[0]
lowerCamelCase__ : List[Any] =dst_width
lowerCamelCase__ : Any =dst_height
lowerCamelCase__ : Union[str, Any] =self.src_w / self.dst_w
lowerCamelCase__ : str =self.src_h / self.dst_h
lowerCamelCase__ : Any =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase__ : Optional[int] =self.img[self.get_y(_snake_case )][self.get_x(_snake_case )]
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase , lowerCAmelCase = 8_00, 6_00
lowerCAmelCase = imread("""image_data/lena.jpg""", 1)
lowerCAmelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 126 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "luke"
def __init__( self , A_=5_0267 , A_=50_0000 , A_=768 , A_=256 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.0_2 , A_=1e-12 , A_=True , A_=None , A_=1 , A_=0 , A_=2 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout | 187 |
'''simple docstring'''
import cmath
import math
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> complex:
"""simple docstring"""
lowerCAmelCase = math.radians(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = math.radians(_SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
lowerCAmelCase = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 187 | 1 |
from itertools import product
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(UpperCamelCase_ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase_ , repeat=UpperCamelCase_ ):
UpperCamelCase = sum(UpperCamelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase( ) -> float:
'''simple docstring'''
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(UpperCamelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(UpperCamelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a( A : Namespace ) -> str:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowercase: Optional[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _lowercase ( a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ ):
"""simple docstring"""
a = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowerCamelCase_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , ):
"""simple docstring"""
a = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'''Loading model {model_type}''' )
a = model_type
a = tf_checkpoint
a = pytorch_dump_output
a = config
a = finetuning_task_name
def UpperCamelCase_ (self ):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
a = self._tf_checkpoint
a = ""
else:
a = self._tf_checkpoint
a = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase_ , self._config , self._pytorch_dump_output , lowerCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 350 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 71 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Union[str, Any] , A_ : List[Any] , A_ : Dict=1_3 , A_ : Any=7 , A_ : Tuple=True , A_ : Dict=True , A_ : str=True , A_ : Tuple=True , A_ : int=9_9 , A_ : List[Any]=2_4 , A_ : str=2 , A_ : int=6 , A_ : Optional[Any]=3_7 , A_ : Dict="gelu" , A_ : Tuple=0.1 , A_ : int=0.1 , A_ : List[Any]=5_1_2 , A_ : List[str]=1_6 , A_ : List[Any]=2 , A_ : Dict=0.02 , A_ : List[Any]=3 , A_ : Union[str, Any]=None , A_ : List[Any]=1_0_0_0 , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = use_input_mask
lowerCAmelCase_ : Optional[int] = use_token_type_ids
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Dict = range_bbox
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ : Tuple = bbox[i, j, 3]
lowerCAmelCase_ : str = bbox[i, j, 1]
lowerCAmelCase_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ : Optional[Any] = bbox[i, j, 2]
lowerCAmelCase_ : Union[str, Any] = bbox[i, j, 0]
lowerCAmelCase_ : str = t
lowerCAmelCase_ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowerCAmelCase_ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : List[str]):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[Any] , A_ : List[str] , A_ : Union[str, Any] , A_ : List[str] , A_ : Any , ):
lowerCAmelCase_ : Optional[Any] = LiltModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_)
lowerCAmelCase_ : Tuple = model(A_ , bbox=A_ , token_type_ids=A_)
lowerCAmelCase_ : Optional[int] = model(A_ , bbox=A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[Any] , A_ : List[Any] , ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : Dict = LiltForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : List[Any] , A_ : int , A_ : str , A_ : Union[str, Any] , ):
lowerCAmelCase_ : Dict = LiltForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def UpperCAmelCase__ ( self : Tuple , A_ : Tuple , A_ : List[str] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Dict):
return True
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = LiltModelTester(self)
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : List[Any]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_)
@slow
def UpperCAmelCase__ ( self : Tuple):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = LiltModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(A_)
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2]] , device=A_)
lowerCAmelCase_ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Dict = model(input_ids=A_ , bbox=A_)
lowerCAmelCase_ : int = torch.Size([1, 2, 7_6_8])
lowerCAmelCase_ : Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3))
| 103 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ):
"""simple docstring"""
model.train()
a :str = model(UpperCAmelCase_ )
a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ):
"""simple docstring"""
set_seed(42 )
a :List[Any] = RegressionModel()
a :Any = deepcopy(UpperCAmelCase_ )
a :Tuple = RegressionDataset(length=80 )
a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
a :str = AdamW(params=model.parameters() , lr=1E-3 )
a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 )
a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a , a , a :str = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a , a :Dict = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a :int = accelerator.gather((ddp_input, ddp_target) )
a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a , a , a :List[str] = get_training_setup(UpperCAmelCase_ )
# Use a single batch
a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) )
a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ):
"""simple docstring"""
a :Optional[int] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a :List[str] = get_training_setup(UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a , a :List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
a , a :List[str] = accelerator.gather((ddp_input, ddp_target) )
a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
a :Optional[Any] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
a , a :int = batch.values()
# Gather the distributed inputs and targs for the base model
a , a :List[str] = accelerator.gather((ddp_input, ddp_target) )
a , a :str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[Any] = Accelerator()
a :int = RegressionDataset(length=80 )
a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 )
a :List[Any] = RegressionDataset(length=96 )
a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 )
a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if iteration < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if batch_num < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = Accelerator()
a :Optional[int] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 94 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase ( UpperCamelCase__ ):
__lowercase : Tuple = "lilt"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=None , A_=4 , A_=1_024 , **A_ , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
UpperCamelCase = channel_shrink_ratio
UpperCamelCase = max_ad_position_embeddings
| 367 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = 'french fries'
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE__ = components['vae']
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : Dict=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self : List[str] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ = inputs['image'].resize((5_04, 5_04) )
SCREAMING_SNAKE_CASE__ = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( A__, unittest.TestCase ):
UpperCAmelCase__ = LongformerTokenizer
UpperCAmelCase__ = True
UpperCAmelCase__ = LongformerTokenizerFast
UpperCAmelCase__ = True
def A_ ( self : str ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase__ : Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase__ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ : Optional[Any] = {"""unk_token""": """<unk>"""}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def A_ ( self : Optional[Any] , **UpperCAmelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def A_ ( self : str , **UpperCAmelCase : int ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def A_ ( self : List[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
lowerCamelCase__ : List[str] = """lower newer"""
lowerCamelCase__ : List[str] = """lower newer"""
return input_text, output_text
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : Dict = """lower newer"""
lowerCamelCase__ : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ : Tuple = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
lowerCamelCase__ : int = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowerCamelCase__ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__A )
lowerCamelCase__ : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=__A )
lowerCamelCase__ : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=__A , add_prefix_space=__A )
lowerCamelCase__ : Dict = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__A , add_prefix_space=__A )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Any = """Encode this sequence."""
lowerCamelCase__ : Any = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
lowerCamelCase__ : List[Any] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowerCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
lowerCamelCase__ : Dict = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowerCamelCase__ : List[Any] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCamelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
lowerCamelCase__ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
lowerCamelCase__ : Any = tokenizer.convert_tokens_to_ids(__A )
lowerCamelCase__ : Tuple = """Encode <mask> sequence"""
lowerCamelCase__ : Optional[Any] = """Encode <mask>sequence"""
lowerCamelCase__ : str = tokenizer.encode(__A )
lowerCamelCase__ : Dict = encoded.index(__A )
lowerCamelCase__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
lowerCamelCase__ : Dict = tokenizer.encode(__A )
lowerCamelCase__ : List[str] = encoded.index(__A )
lowerCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def A_ ( self : str ) -> List[str]:
pass
def A_ ( self : Any ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCamelCase__ : Union[str, Any] = """A, <mask> AllenNLP sentence."""
lowerCamelCase__ : str = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
lowerCamelCase__ : Optional[int] = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A_ ( self : str ) -> Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __A )
self.assertEqual(post_processor_state['add_prefix_space'] , __A )
self.assertEqual(post_processor_state['trim_offsets'] , __A )
def A_ ( self : Optional[Any] ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ : List[str] = F"""{text_of_1_token} {text_of_1_token}"""
lowerCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : Any = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : List[str] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : Dict = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : str = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCamelCase__ : Tuple = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 351 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_UpperCAmelCase ) == 1:
return True
lowerCamelCase__ : List[Any] = series[1] - series[0]
for index in range(len(_UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase__ : Any = 0
for val in series:
answer += val
return answer / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "roc_bert"
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__="absolute", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=768, lowerCAmelCase__=910, lowerCAmelCase__=512, lowerCAmelCase__=2_4858, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = enable_pronunciation
snake_case_ = enable_shape
snake_case_ = pronunciation_embed_dim
snake_case_ = pronunciation_vocab_size
snake_case_ = shape_embed_dim
snake_case_ = shape_vocab_size
snake_case_ = concat_input
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 69 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Any ,__snake_case :List[Any] ) -> Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
a__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__snake_case )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = 'sshleifer/tiny-gpt2'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = 'sgugger/tiny-distilbert-classification'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,only_pretrain_model=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = 'sshleifer/tiny-gpt2'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,torchscript=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def lowerCamelCase__( self :int ) -> str:
a__ = 'sshleifer/tiny-gpt2'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,fpaa=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = 'sshleifer/tiny-gpt2'
a__ = AutoConfig.from_pretrained(__snake_case )
# set architectures equal to `None`
a__ = None
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Dict ) -> int:
a__ = 'sshleifer/tiny-gpt2'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def lowerCamelCase__( self :int ) -> List[str]:
a__ = 'sshleifer/tiny-gpt2'
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=__snake_case ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = 'sshleifer/tiny-gpt2'
a__ = AutoConfig.from_pretrained(__snake_case )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = 'sshleifer/tinier_bart'
a__ = AutoConfig.from_pretrained(__snake_case )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = 'sshleifer/tiny-gpt2'
a__ = AutoConfig.from_pretrained(__snake_case )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = 'sshleifer/tinier_bart'
a__ = AutoConfig.from_pretrained(__snake_case )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,save_to_csv=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__snake_case ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(__snake_case ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(__snake_case ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(__snake_case ,'train_time.csv' ) ,env_info_csv_file=os.path.join(__snake_case ,'env.csv' ) ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(__snake_case ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case ,'env.csv' ) ).exists() )
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__snake_case :List[str] ):
self.assertTrue(hasattr(__snake_case ,'sequential' ) )
self.assertTrue(hasattr(__snake_case ,'cumulative' ) )
self.assertTrue(hasattr(__snake_case ,'current' ) )
self.assertTrue(hasattr(__snake_case ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__snake_case ,inference=__snake_case ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__snake_case ,'log.txt' ) ,log_print=__snake_case ,trace_memory_line_by_line=__snake_case ,multi_process=__snake_case ,)
a__ = PyTorchBenchmark(__snake_case )
a__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__snake_case ,'log.txt' ) ).exists() )
| 240 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase ( __lowerCamelCase : Dict ):
return EnvironmentCommand()
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase ( A_ ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case__ )
download_parser.add_argument(
"--accelerate-config_file" , default=snake_case__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=snake_case__ )
def __init__(self : List[Any] , snake_case__ : Any , *snake_case__ : Tuple ) -> None:
'''simple docstring'''
snake_case : str = accelerate_config_file
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : List[str] = "not installed"
if is_safetensors_available():
import safetensors
snake_case : Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case : Union[str, Any] = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case : Tuple = "not installed"
snake_case : Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(snake_case__ ):
snake_case : Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case : Any = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(snake_case__ , snake_case__ )
else f"""\t{accelerate_config}"""
)
snake_case : Dict = "not installed"
snake_case : List[str] = "NA"
if is_torch_available():
import torch
snake_case : List[str] = torch.__version__
snake_case : Union[str, Any] = torch.cuda.is_available()
snake_case : Optional[int] = "not installed"
snake_case : Any = "NA"
if is_tf_available():
import tensorflow as tf
snake_case : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
snake_case : str = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case : Tuple = bool(tf.config.list_physical_devices("GPU" ) )
snake_case : Any = "not installed"
snake_case : List[str] = "not installed"
snake_case : Union[str, Any] = "not installed"
snake_case : List[str] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case : Union[str, Any] = flax.__version__
snake_case : Tuple = jax.__version__
snake_case : int = jaxlib.__version__
snake_case : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
snake_case : int = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case__ ) )
return info
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : int ) -> List[Any]:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 10 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10 | 1 |
lowerCAmelCase__ = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 11 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 0 |
'''simple docstring'''
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowercase__ ( __UpperCamelCase=None )-> List[str]:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCamelCase = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__UpperCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__UpperCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCamelCase = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__UpperCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> List[Any]:
UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase = defaults.commands
if not args.tpu_name:
UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCamelCase = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
UpperCamelCase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCamelCase = """; """.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(__UpperCamelCase )}" )
return
subprocess.run(__UpperCamelCase )
print("""Successfully setup pod.""" )
def lowercase__ ( )-> Any:
UpperCamelCase = tpu_command_parser()
UpperCamelCase = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 356 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2"] , _SCREAMING_SNAKE_CASE=[1, 2] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FocalNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Tuple:
"""simple docstring"""
return
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = reshaped_hidden_states[0].shape
UpperCamelCase = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = (FocalNetBackbone,) if is_torch_available() else ()
lowercase = FocalNetConfig
lowercase = False
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
| 183 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__A = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__A = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) :Optional[int] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 293 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__A = logging.getLogger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = label_idx
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = mode.value
lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :List[str] = 1
lowerCAmelCase__ :Union[str, Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :List[str] = []
else:
lowerCAmelCase__ :List[str] = line.split(' ' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__UpperCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :str = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Optional[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = mode.value
lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :Any = 1
lowerCAmelCase__ :Optional[Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Dict = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = preds_list[example_id]
lowerCAmelCase__ :Tuple = ''
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 293 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''Salesforce/codegen-350M-mono''': 2_0_4_8,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
lowerCamelCase_ : str = CodeGenTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
if kwargs.pop('''add_bos_token''' , __lowerCAmelCase ):
snake_case_ : List[Any] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
snake_case_ : Tuple = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
snake_case_ : Dict = add_prefix_space
snake_case_ : str = pre_tok_class(**__lowerCAmelCase )
snake_case_ : int = add_prefix_space
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = super().decode(
token_ids=__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
if truncate_before_pattern is not None and len(__lowerCAmelCase ) > 0:
snake_case_ : Any = self.truncate(__lowerCAmelCase , __lowerCAmelCase )
return decoded_text
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
def find_re(__magic_name__ , __magic_name__ , __magic_name__ ):
snake_case_ : Any = pattern.search(__lowerCAmelCase , __lowerCAmelCase )
return m.start() if m else -1
snake_case_ : Optional[Any] = [re.compile(__lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case_ : int = list(re.finditer('''^print''' , __lowerCAmelCase , re.MULTILINE ) )
if len(__lowerCAmelCase ) > 1:
snake_case_ : int = completion[: prints[1].start()]
snake_case_ : Union[str, Any] = list(re.finditer('''^def''' , __lowerCAmelCase , re.MULTILINE ) )
if len(__lowerCAmelCase ) > 1:
snake_case_ : Tuple = completion[: defs[1].start()]
snake_case_ : Tuple = 0
snake_case_ : Any = [
pos for pos in [find_re(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__lowerCAmelCase ) > 0:
return completion[: min(__lowerCAmelCase )]
else:
return completion
| 371 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 | 0 |
import numpy as np
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return vector * sigmoid(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = "▁"
lowercase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase__ : List[Any] = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase__ : Tuple = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase__ : Optional[int] = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
lowercase__ : Dict = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ["input_ids"]
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = RESOURCE_FILES_NAMES
def __init__( self : Dict , __lowercase : List[Any] , __lowercase : Tuple=None , __lowercase : List[str]=False , __lowercase : List[str]="utf8" , __lowercase : Union[str, Any]="[UNK]" , __lowercase : List[str]="[SEP]" , __lowercase : Optional[Any]="[PAD]" , __lowercase : Any="[CLS]" , __lowercase : Any="[MASK]" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Tuple , ):
"""simple docstring"""
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
snake_case_ = do_lower_case
snake_case_ = sentencepiece_model_ckpt
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
snake_case_ = self.load_vocab(filepath=__lowercase )
else:
snake_case_ = {self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )}
snake_case_ = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : Dict , __lowercase : Optional[int] ):
"""simple docstring"""
if text is None:
return None
snake_case_ = self.tokenize(__lowercase )
snake_case_ , snake_case_ = "", []
for i, ch in enumerate(__lowercase ):
if ch in self.SP_CHAR_MAPPING:
snake_case_ = self.SP_CHAR_MAPPING.get(__lowercase )
else:
snake_case_ = unicodedata.normalize("NFKC" , __lowercase )
if self.is_whitespace(__lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowercase ) )
snake_case_ , snake_case_ , snake_case_ = normalized_text, [], 0
if self.do_lower_case:
snake_case_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
snake_case_ = token[1:]
snake_case_ = text[offset:].index(__lowercase ) + offset
snake_case_ = start + len(__lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
snake_case_ = end
return token_mapping
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def snake_case__ ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[str] ):
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , __lowercase : str ):
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : int , __lowercase : Optional[Any] ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) )
def snake_case__ ( self : List[str] , __lowercase : int , __lowercase : Any=False , __lowercase : str=64 , __lowercase : Optional[Any]=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get("enable_sampling" ) is True:
snake_case_ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
snake_case_ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
snake_case_ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
snake_case_ = self.sp_model.EncodeAsPieces(__lowercase )
else:
snake_case_ = self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase )
snake_case_ = []
for pi, piece in enumerate(__lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowercase ) and pi != 0:
new_pieces.append(__lowercase )
continue
else:
continue
snake_case_ = 0
for i, chunk in enumerate(__lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowercase )
snake_case_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case_ = i
if len(__lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = "".join(__lowercase ).replace(__lowercase , " " ).strip()
return out_string
def snake_case__ ( self : int , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.convert_ids_to_tokens(__lowercase )
snake_case_ = "".join(__lowercase ).replace(__lowercase , " " ).strip()
return out_string
def snake_case__ ( self : Dict , __lowercase : Any ):
"""simple docstring"""
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : str , __lowercase : List[Any] ):
"""simple docstring"""
return self.reverse_vocab.get(__lowercase , self.unk_token )
def snake_case__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : str , __lowercase : List[str] , __lowercase : Any=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Dict , __lowercase : List[Any] , __lowercase : List[Any]=None , __lowercase : Dict=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def snake_case__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3)
def snake_case__ ( self : Any , __lowercase : Union[str, Any] ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , __lowercase : Any ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , __lowercase : Dict ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowercase ) == 1:
snake_case_ = unicodedata.category(__lowercase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = {}
with io.open(__lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__lowercase ):
snake_case_ = line.rstrip("\n" )
snake_case_ = int(__lowercase )
return token_to_idx
def snake_case__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
snake_case_ = 0
if os.path.isdir(__lowercase ):
snake_case_ = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
snake_case_ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(__lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
snake_case_ = token_index
writer.write(token + "\n" )
index += 1
snake_case_ = os.path.join(__lowercase , "sentencepiece.bpe.model" )
with open(__lowercase , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (vocab_file,)
| 187 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowercase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase : str = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def __A ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
__UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_torch
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__UpperCAmelCase )
@slow
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
self.run_pipeline_test(__UpperCAmelCase , [] )
@require_tf
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
self.run_pipeline_test(__UpperCAmelCase , [] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = fill_masker.tokenizer
__UpperCAmelCase : Union[str, Any] = fill_masker.model
__UpperCAmelCase : Tuple = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
with self.assertRaises(__UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Dict = tokenizer.get_vocab()
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase )
__UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Call argument
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Score equivalence
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ) == set(__UpperCAmelCase ):
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 )
__UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = tokenizer.get_vocab()
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# top_k=2, ntargets=3
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase ) , 3 )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Dict = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
| 16 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_snake_case : int = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
_snake_case : Union[str, Any] = F"{src_lang}-{tgt_lang}"
_snake_case : Dict = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
_snake_case : Any = os.path.join(snake_case__ , """README.md""" )
print(F"Generating {path}" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
A_ = Path(__file__).resolve().parent.parent.parent
A_ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A_ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 64 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =DDIMPipeline
UpperCamelCase__ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : Tuple =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCamelCase : int =DDIMScheduler()
__UpperCamelCase : Optional[int] ={'unet': unet, 'scheduler': scheduler}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : str =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Tuple =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : int =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCamelCase : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCamelCase : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='google/ddpm-cifar10-32'
__UpperCamelCase : str =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =DDIMScheduler()
__UpperCamelCase : List[Any] =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddim.to(lowerCamelCase__ )
ddim.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : List[str] =ddim(generator=lowerCamelCase__ , eta=0.0 , output_type='numpy' ).images
__UpperCamelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : str =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='google/ddpm-ema-bedroom-256'
__UpperCamelCase : Any =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =DDIMScheduler.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Dict =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddpm.to(lowerCamelCase__ )
ddpm.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =ddpm(generator=lowerCamelCase__ , output_type='numpy' ).images
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase : Optional[Any] =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 71 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase__ ( a__: Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = filter(lambda a__ : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ :str = logging.getLogger(__name__)
def lowerCAmelCase__ ( a__: Tuple , a__: int ) -> int:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase__ ( a__: Optional[Any] , a__: str ) -> Tuple:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , )
class __a ( pl.Callback ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case__ )
@rank_zero_only
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
_UpperCAmelCase = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=snake_case__ )
generations_file.parent.mkdir(exist_ok=snake_case__ )
with open(snake_case__ , 'a+' ) as writer:
for key in sorted(snake_case__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(snake_case__ , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = f'''{key}: {val:.6f}\n'''
writer.write(snake_case__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case__ )
@rank_zero_only
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(snake_case__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case__ , snake_case__ , 'test' )
@rank_zero_only
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 353 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ :Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCAmelCase__ ( a__: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
_UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCAmelCase__ ( a__: str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
_UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase = defaults.commands
if not args.tpu_name:
_UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , a__ ):
_UpperCAmelCase = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
_UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCAmelCase = '; '.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(a__ )}''' )
return
subprocess.run(a__ )
print('Successfully setup pod.' )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tpu_command_parser()
_UpperCAmelCase = parser.parse_args()
tpu_command_launcher(a__ )
| 185 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowercase__:
"""simple docstring"""
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
raise NotImplementedError()
def _lowercase ( self : Dict ) -> str:
raise NotImplementedError()
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
lowercase_ = tokenizer
lowercase_ = skip_prompt
lowercase_ = decode_kwargs
# variables used in the streaming process
lowercase_ = []
lowercase_ = 0
lowercase_ = True
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowercase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowercase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase_ = text[self.print_len :]
lowercase_ = []
lowercase_ = 0
else:
lowercase_ = ''''''
lowercase_ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> int:
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = Queue()
lowercase_ = None
lowercase_ = timeout
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple ) -> Union[str, Any]:
return self
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 30 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30 | 1 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_UpperCAmelCase : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_UpperCAmelCase : Any = float(factorial(lowerCAmelCase ) )
coefficient /= factorial(lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a ( UpperCAmelCase ):
_lowercase = "facebook/nllb-200-distilled-600M"
_lowercase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_lowercase = "translator"
_lowercase = AutoTokenizer
_lowercase = AutoModelForSeqaSeqLM
_lowercase = LANGUAGE_CODES
_lowercase = ["text", "text", "text"]
_lowercase = ["text"]
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : int = self.lang_to_code[src_lang]
_UpperCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="pt" , src_lang=A_ , tgt_lang=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 189 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE ={"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["BeitFeatureExtractor"]
__SCREAMING_SNAKE_CASE =["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( lowercase_ ):
lowercase = 42
class UpperCamelCase ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self ,__UpperCamelCase = 32 ,__UpperCamelCase = 64 ,__UpperCamelCase = 20 ,__UpperCamelCase = 768 ,__UpperCamelCase=77 ,__UpperCamelCase=4 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = "silu" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "linear" ,__UpperCamelCase = "prd" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase_ : Dict = num_attention_heads
lowercase_ : Dict = attention_head_dim
lowercase_ : Any = num_attention_heads * attention_head_dim
lowercase_ : Optional[Any] = additional_embeddings
lowercase_ : int = time_embed_dim or inner_dim
lowercase_ : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase_ : List[str] = clip_embed_dim or embedding_dim
lowercase_ : Union[str, Any] = Timesteps(__UpperCamelCase ,__UpperCamelCase ,0 )
lowercase_ : Tuple = TimestepEmbedding(__UpperCamelCase ,__UpperCamelCase ,out_dim=__UpperCamelCase ,act_fn=__UpperCamelCase )
lowercase_ : Optional[Any] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
if embedding_proj_norm_type is None:
lowercase_ : Dict = None
elif embedding_proj_norm_type == "layer":
lowercase_ : str = nn.LayerNorm(__UpperCamelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase_ : Tuple = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
if encoder_hid_proj_type is None:
lowercase_ : str = None
elif encoder_hid_proj_type == "linear":
lowercase_ : Optional[int] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase_ : List[Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,__UpperCamelCase ) )
if added_emb_type == "prd":
lowercase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,1 ,__UpperCamelCase ) )
elif added_emb_type is None:
lowercase_ : List[Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dropout=__UpperCamelCase ,activation_fn='gelu' ,attention_bias=__UpperCamelCase ,)
for d in range(__UpperCamelCase )
] )
if norm_in_type == "layer":
lowercase_ : Optional[int] = nn.LayerNorm(__UpperCamelCase )
elif norm_in_type is None:
lowercase_ : Dict = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase_ : List[Any] = nn.LayerNorm(__UpperCamelCase )
lowercase_ : Optional[int] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase_ : int = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,__UpperCamelCase ,persistent=__UpperCamelCase )
lowercase_ : List[Any] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
lowercase_ : Optional[int] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
lowercase_ : Tuple = {}
def fn_recursive_add_processors(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if hasattr(__UpperCamelCase ,'set_processor' ):
lowercase_ : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,__UpperCamelCase ,__UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return processors
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = len(self.attn_processors.keys() )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__UpperCamelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if hasattr(__UpperCamelCase ,'set_processor' ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
module.set_processor(__UpperCamelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,__UpperCamelCase ,__UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,) -> Any:
'''simple docstring'''
lowercase_ : Any = hidden_states.shape[0]
lowercase_ : str = timestep
if not torch.is_tensor(__UpperCamelCase ):
lowercase_ : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(__UpperCamelCase ) and len(timesteps.shape ) == 0:
lowercase_ : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ : Optional[int] = timesteps * torch.ones(__UpperCamelCase ,dtype=timesteps.dtype ,device=timesteps.device )
lowercase_ : Any = self.time_proj(__UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase_ : List[Any] = timesteps_projected.to(dtype=self.dtype )
lowercase_ : Any = self.time_embedding(__UpperCamelCase )
if self.embedding_proj_norm is not None:
lowercase_ : List[str] = self.embedding_proj_norm(__UpperCamelCase )
lowercase_ : Any = self.embedding_proj(__UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase_ : Optional[int] = self.encoder_hidden_states_proj(__UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase_ : List[Any] = self.proj_in(__UpperCamelCase )
lowercase_ : List[str] = self.positional_embedding.to(hidden_states.dtype )
lowercase_ : Dict = []
lowercase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(__UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase_ : List[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase_ : List[Any] = hidden_states[:, None, :]
lowercase_ : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase_ : int = self.prd_embedding.to(hidden_states.dtype ).expand(__UpperCamelCase ,-1 ,-1 )
additional_embeds.append(__UpperCamelCase )
lowercase_ : int = torch.cat(
__UpperCamelCase ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase_ : Optional[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase_ : Any = F.pad(
__UpperCamelCase ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
lowercase_ : Dict = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase_ : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase_ : str = F.pad(__UpperCamelCase ,(0, self.additional_embeddings) ,value=0.0 )
lowercase_ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase_ : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
lowercase_ : Optional[Any] = self.norm_in(__UpperCamelCase )
for block in self.transformer_blocks:
lowercase_ : int = block(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : str = self.norm_out(__UpperCamelCase )
if self.prd_embedding is not None:
lowercase_ : Tuple = hidden_states[:, -1]
else:
lowercase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
lowercase_ : str = self.proj_to_clip_embeddings(__UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 213 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( __A : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
_SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 , len(__A ) ):
_SCREAMING_SNAKE_CASE = nums[i]
_SCREAMING_SNAKE_CASE = max(__A , ans + num , __A )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase_ = int(input('Enter number of elements : ').strip())
lowerCamelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 111 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''efficientnet'''
def __init__( self : Optional[Any] , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 6_0_0 , __lowerCamelCase : float = 2.0 , __lowerCamelCase : float = 3.1 , __lowerCamelCase : int = 8 , __lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase : List[int] = [] , __lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase : float = 0.2_5 , __lowerCamelCase : str = "swish" , __lowerCamelCase : int = 2_5_6_0 , __lowerCamelCase : str = "mean" , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 0.0_0_1 , __lowerCamelCase : float = 0.9_9 , __lowerCamelCase : float = 0.5 , __lowerCamelCase : float = 0.2 , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = width_coefficient
_SCREAMING_SNAKE_CASE = depth_coefficient
_SCREAMING_SNAKE_CASE = depth_divisor
_SCREAMING_SNAKE_CASE = kernel_sizes
_SCREAMING_SNAKE_CASE = in_channels
_SCREAMING_SNAKE_CASE = out_channels
_SCREAMING_SNAKE_CASE = depthwise_padding
_SCREAMING_SNAKE_CASE = strides
_SCREAMING_SNAKE_CASE = num_block_repeats
_SCREAMING_SNAKE_CASE = expand_ratios
_SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dim
_SCREAMING_SNAKE_CASE = pooling_type
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = batch_norm_eps
_SCREAMING_SNAKE_CASE = batch_norm_momentum
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = drop_connect_rate
_SCREAMING_SNAKE_CASE = sum(__lowerCamelCase ) * 4
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return 1e-5
| 111 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return "".join(sorted(_UpperCAmelCase ) )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return word_by_signature[signature(_UpperCAmelCase )]
lowerCamelCase_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase_ : int = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase_ : int = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase , A ):
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = load_tool('text-to-speech')
self.tool.setup()
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = self.tool('hey')
_UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = self.tool('hey')
_UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 290 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self : str , A : str , A : Dict=13 , A : int=7 , A : Tuple=True , A : Union[str, Any]=True , A : Any=True , A : Dict=True , A : Dict=99 , A : Tuple=32 , A : Any=2 , A : Any=4 , A : Any=37 , A : Optional[Any]="gelu" , A : List[Any]=0.1 , A : Tuple=0.1 , A : Optional[Any]=5_12 , A : Tuple=16 , A : int=2 , A : List[str]=0.0_2 , A : int=False , A : List[Any]=True , A : Optional[Any]="None" , A : Union[str, Any]=3 , A : List[str]=4 , A : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : Any , A : List[str] , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , A : Tuple , A : Tuple , A : Optional[int] , A : List[str] , A : Any , A : List[str] , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , A : Optional[int] , A : List[Any] , A : Any , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[Any] , A : int , A : Any , A : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : Any , A : Tuple , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
_UpperCAmelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , A , atol=1E-4)
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase ):
if not nums:
raise ValueError('''List is empty''' )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(_lowercase ):
__A = name.replace(_lowercase , _lowercase )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(_lowercase )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
__A = session.run(_lowercase )
print(F'''Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> Any:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowercase , required=_lowercase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowercase , default=_lowercase , required=_lowercase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowercase , required=_lowercase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowercase , required=_lowercase , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(_lowercase )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 359 |
import copy
import re
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "hp"
snake_case_ = {}
snake_case_ = None
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Any ):
__A = prefix
__A = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase_ ( A : Dict ,A : int ):
if len(A ) == 0:
return ""
__A = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A ) + 1 ):
__A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A : str ):
__A = ""
while integer != 0:
__A = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__A = 0
while True:
__A = word + "#" + int_to_alphabetic(A )
if sword in info["reverse_short_word"]:
continue
else:
__A = sword
break
__A = short_word
__A = word
return short_word
@staticmethod
def UpperCamelCase_ ( A : int ,A : Tuple ):
__A = param_name.split("_" )
__A = [TrialShortNamer.shortname_for_word(A ,A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__A = ["", "_"]
for separator in separators:
__A = separator.join(A )
if shortname not in info["reverse_short_param"]:
__A = shortname
__A = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase_ ( A : Optional[Any] ,A : Tuple ):
__A = TrialShortNamer.shortname_for_key(A ,A )
__A = short_name
__A = param_name
@classmethod
def UpperCamelCase_ ( cls : Dict ):
if cls.NAMING_INFO is not None:
return
__A = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A ,A )
__A = info
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : List[str] ):
cls.build_naming_info()
assert cls.PREFIX is not None
__A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__A = cls.NAMING_INFO["short_param"][k]
if isinstance(A ,A ):
__A = 1 if v else 0
__A = "" if isinstance(A ,(int, float) ) else "-"
__A = f'''{key}{sep}{v}'''
name.append(A )
return "_".join(A )
@classmethod
def UpperCamelCase_ ( cls : Tuple ,A : Tuple ):
__A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__A = []
else:
__A = repr.split("_" )
__A = {}
for value in values:
if "-" in value:
__A , __A = value.split("-" )
else:
__A = re.sub("[0-9.]" ,"" ,A )
__A = float(re.sub("[^0-9.]" ,"" ,A ) )
__A = cls.NAMING_INFO["reverse_short_param"][p_k]
__A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__A = cls.DEFAULTS[k]
return parameters
| 124 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase : str = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase__ : Optional[Any] =bs[:]
lowerCamelCase__ : Optional[int] =0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : List[str] =[chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =set()
lowerCamelCase__ : List[str] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : int =char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Union[str, Any]="replace", lowerCamelCase : Any="<s>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : int="</s>", lowerCamelCase : Dict="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : Optional[Any]="<pad>", lowerCamelCase : Optional[Any]="<mask>", lowerCamelCase : Tuple=False, **lowerCamelCase : Tuple, )-> Any:
lowerCamelCase__ : int =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else bos_token
lowerCamelCase__ : int =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else eos_token
lowerCamelCase__ : Tuple =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else sep_token
lowerCamelCase__ : List[str] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else cls_token
lowerCamelCase__ : List[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else unk_token
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
with open(lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : List[str] =json.load(lowerCamelCase )
lowerCamelCase__ : Any ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Dict =errors # how to handle errors in decoding
lowerCamelCase__ : Dict =bytes_to_unicode()
lowerCamelCase__ : Optional[Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : Optional[Any] =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : str =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[int] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] ={}
lowerCamelCase__ : List[str] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Tuple =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self : Tuple )-> int:
return len(self.encoder )
def snake_case ( self : Optional[Any] )-> str:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case ( self : List[str], lowerCamelCase : Dict )-> str:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Tuple =tuple(lowerCamelCase )
lowerCamelCase__ : int =get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : Tuple =min(lowerCamelCase, key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Any =bigram
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : Optional[Any] =0
while i < len(lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] =word.index(lowerCamelCase, lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] =j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Optional[int] =tuple(lowerCamelCase )
lowerCamelCase__ : int =new_word
if len(lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : Optional[Any] =get_pairs(lowerCamelCase )
lowerCamelCase__ : str =''' '''.join(lowerCamelCase )
lowerCamelCase__ : Optional[int] =word
return word
def snake_case ( self : Optional[int], lowerCamelCase : Tuple )-> Any:
lowerCamelCase__ : Optional[int] =[]
for token in re.findall(self.pat, lowerCamelCase ):
lowerCamelCase__ : List[str] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def snake_case ( self : Optional[Any], lowerCamelCase : List[str] )-> List[Any]:
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token ) )
def snake_case ( self : Tuple, lowerCamelCase : int )-> List[Any]:
return self.decoder.get(lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : List[str] )-> int:
lowerCamelCase__ : List[str] =''''''.join(lowerCamelCase )
lowerCamelCase__ : Optional[int] =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def snake_case ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : int =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : int =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase ) + '''\n''' )
lowerCamelCase__ : int =0
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : Dict =token_index
writer.write(''' '''.join(lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
lowerCamelCase__ : str =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : List[str], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Union[str, Any] =[self.sep_token_id]
lowerCamelCase__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : List[str]=False, **lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : Any =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : List[str] =''' ''' + text
return (text, kwargs)
def snake_case ( self : Dict, lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCamelCase : Optional[int] = None, lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, )-> dict:
lowerCamelCase__ : Optional[Any] =super()._pad(
encoded_inputs=lowerCamelCase, max_length=lowerCamelCase, padding_strategy=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Optional[Any] ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Any =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : int =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Optional[int] =len(lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Dict =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 238 |
"""simple docstring"""
from __future__ import annotations
_lowercase : Dict = 1.6_021E-19 # units = C
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 1 |
from collections.abc import Sequence
def a__ ( __UpperCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
SCREAMING_SNAKE_CASE_ = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ = nums[i]
SCREAMING_SNAKE_CASE_ = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
A : int = int(input("Enter number of elements : ").strip())
A : Any = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 359 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 0 |
'''simple docstring'''
__UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :List[str] , snake_case :Optional[int] , snake_case :str=13 , snake_case :Optional[int]=3 , snake_case :str=True , snake_case :Dict=True , snake_case :List[str]=0.1 , snake_case :Optional[int]=0.1 , snake_case :Any=224 , snake_case :Optional[Any]=1_000 , snake_case :Tuple=[3, 3, 6, 4] , snake_case :Dict=[48, 56, 112, 220] , ):
'''simple docstring'''
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Optional[Any] = num_channels
A_ : Optional[int] = is_training
A_ : Any = use_labels
A_ : Dict = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Union[str, Any] = num_labels
A_ : int = image_size
A_ : Optional[int] = layer_depths
A_ : Optional[Any] = embed_dims
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] , snake_case :str , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : str = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
A_ : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Union[str, Any] , snake_case :List[str] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = self.num_labels
A_ : List[Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : Tuple = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
(A_) : Tuple = self.prepare_config_and_inputs()
A_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Tuple = SwiftFormerModelTester(self )
A_ : List[Any] = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(_a )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_a )
A_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case :str , snake_case :Tuple , snake_case :List[Any] ):
A_ : int = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(_a , _a ) )
A_ : str = outputs.hidden_states
A_ : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Tuple = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
def _config_zero_init(snake_case :str ):
A_ : Dict = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-10 )
if isinstance(getattr(_a , _a , _a ) , _a ):
A_ : List[str] = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[int] = _config_zero_init(_a )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
A_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : List[str] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_a )
A_ : int = self.default_image_processor
A_ : List[Any] = prepare_img()
A_ : Any = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Any = model(**_a )
# verify the logits
A_ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
A_ : Union[str, Any] = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 370 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCAmelCase : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(snake_case , reverse=snake_case ):
A_ : str = Node(snake_case , self.head )
def __iter__( self :Any ):
'''simple docstring'''
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__( self :Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self :Tuple ):
'''simple docstring'''
return " -> ".join([str(snake_case ) for node in self] )
def __snake_case ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 70 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 16 ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__magic_name__ = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["""lr"""]
__SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
__SCREAMING_SNAKE_CASE = int(config["""seed"""] )
__SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
set_seed(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
__SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__SCREAMING_SNAKE_CASE = os.path.split(UpperCamelCase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(UpperCamelCase_ ),
"""epoch""": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=UpperCamelCase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 100 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = XLNetTokenizer
__lowercase : List[str] = XLNetTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100 | 1 |
'''simple docstring'''
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = number_chain
while number < 10000000:
UpperCAmelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ):
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 61 |
'''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = len(grid[0] )
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
UpperCAmelCase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__ = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
UpperCAmelCase__ = max_product
return largest
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
UpperCAmelCase__ = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 61 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
_UpperCAmelCase : List[Any] = {"target_lang": "fi", "source_lang": "en"}
_UpperCAmelCase : Tuple = ">>zh<<"
_UpperCAmelCase : str = "Helsinki-NLP/"
if is_torch_available():
_UpperCAmelCase : List[Any] = "pt"
elif is_tf_available():
_UpperCAmelCase : Tuple = "tf"
else:
_UpperCAmelCase : Dict = "jax"
@require_sentencepiece
class __lowerCAmelCase ( A_ , unittest.TestCase):
_a = MarianTokenizer
_a = False
_a = True
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
super().setUp()
lowercase :Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase :int = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowercase :Optional[Any] = Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowercase :int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , **_lowerCAmelCase: Optional[int] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int ):
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :List[Any] = "</s>"
lowercase :str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowerCamelCase ) , 9 )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[Any] = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
lowercase :int = en_de_tokenizer(["I am a small frog"] , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase :str = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(_lowerCamelCase , batch.input_ids[0] )
lowercase :Any = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowerCamelCase )
lowercase :List[Any] = [x.name for x in Path(_lowerCamelCase ).glob("*" )]
self.assertIn("source.spm" , _lowerCamelCase )
MarianTokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Optional[int] = self.get_tokenizer()
lowercase :Union[str, Any] = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :int = self.get_tokenizer()
lowercase :Optional[int] = tok(["I am a tiny frog", "I am a small frog"] , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
# fmt: off
lowercase :Tuple = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[str] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowercase :List[Any] = "Tämä on testi"
lowercase :Union[str, Any] = "This is a test"
lowercase :int = [76, 7, 20_47, 2]
lowercase :str = [69, 12, 11, 9_40, 2]
lowercase :Dict = tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowercase :Any = tokenizer(text_target=_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowercase :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 236 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = ' Hello world! cécé herlolip'
UpperCAmelCase__ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
_snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
_snake_case = dct.pop(__lowerCamelCase )
_snake_case = val
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str:
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]:
if not os.path.exists(__lowerCamelCase ):
_snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval()
else:
_snake_case = load_xsum_checkpoint(__lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case = checkpoint_path.replace('''.''' , '''-''' )
_snake_case = BartConfig.from_pretrained(__lowerCamelCase )
_snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 )
_snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case = bart.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = BartForSequenceClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_snake_case = bart.model.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''decoder.embed_tokens.weight''']
_snake_case = bart.extract_features(__lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case = BartModel(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_snake_case = model(__lowerCamelCase ).model[0]
else:
_snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCamelCase )
if hasattr(__lowerCamelCase , '''lm_head''' ):
_snake_case = make_linear_from_emb(model.model.shared )
_snake_case = model.model(__lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 288 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "facebook/bart-large-mnli"
UpperCAmelCase_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
UpperCAmelCase_ = "text_classifier"
UpperCAmelCase_ = AutoTokenizer
UpperCAmelCase_ = AutoModelForSequenceClassification
UpperCAmelCase_ = ["text", ["text"]]
UpperCAmelCase_ = ["text"]
def snake_case_ (self ) -> List[Any]:
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase = int(__a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case_ (self , __a , __a ) -> List[Any]:
UpperCamelCase = labels
return self.pre_processor(
[text] * len(__a ) , [F"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case_ (self , __a ) -> int:
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 244 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a = None , __a = None , __a = False , **__a , ) -> List[Any]:
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
UpperCamelCase = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
UpperCamelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a = None , __a = None , **__a , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase = dataset
UpperCamelCase = name
UpperCamelCase = con
UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase = num_proc
UpperCamelCase = to_sql_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.to_sql_kwargs.pop("sql" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("con" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("index" , __a )
UpperCamelCase = self._write(index=__a , **self.to_sql_kwargs )
return written
def snake_case_ (self , __a ) -> Any:
UpperCamelCase , UpperCamelCase , UpperCamelCase = args
UpperCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase = batch.to_pandas()
UpperCamelCase = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def snake_case_ (self , __a , **__a ) -> int:
UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 244 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "transfo-xl"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int ,A : List[str]=26_77_35 ,A : Tuple=[2_00_00, 4_00_00, 20_00_00] ,A : int=10_24 ,A : Any=10_24 ,A : Dict=16 ,A : Union[str, Any]=64 ,A : Union[str, Any]=40_96 ,A : Union[str, Any]=4 ,A : Optional[Any]=False ,A : Any=18 ,A : Optional[int]=16_00 ,A : str=10_00 ,A : Optional[int]=True ,A : Dict=True ,A : Any=0 ,A : int=-1 ,A : str=True ,A : str=0.1 ,A : Dict=0.0 ,A : str=True ,A : Any="normal" ,A : List[str]=0.01 ,A : int=0.01 ,A : Optional[int]=0.02 ,A : int=1E-5 ,A : Optional[Any]=0 ,**A : Optional[int] ,):
__A = vocab_size
__A = []
self.cutoffs.extend(A )
if proj_share_all_but_first:
__A = [False] + [True] * len(self.cutoffs )
else:
__A = [False] + [False] * len(self.cutoffs )
__A = d_model
__A = d_embed
__A = d_head
__A = d_inner
__A = div_val
__A = pre_lnorm
__A = n_layer
__A = n_head
__A = mem_len
__A = same_length
__A = attn_type
__A = clamp_len
__A = sample_softmax
__A = adaptive
__A = dropout
__A = dropatt
__A = untie_r
__A = init
__A = init_range
__A = proj_init_std
__A = init_std
__A = layer_norm_epsilon
super().__init__(eos_token_id=A ,**A )
@property
def UpperCamelCase_ ( self : List[str] ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCamelCase_ ( self : Any ,A : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 15 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = ['model.decoder.embed_positions.weights']
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if "emb" in name:
A_ : Tuple = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
A_ : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
A_ : Optional[Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
A_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
A_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
A_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
A_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
A_ : Dict = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
A_ : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
A_ : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : Tuple = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = list(state_dict.keys() )
A_ : List[Any] = {}
for key in keys:
A_ : List[str] = state_dict.pop(_UpperCAmelCase )
A_ : Tuple = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Any = val[:hidden_size, :]
A_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if checkpoint == "small":
# default config values
A_ : Optional[Any] = 1024
A_ : Tuple = 24
A_ : int = 16
elif checkpoint == "medium":
A_ : Any = 1536
A_ : Union[str, Any] = 48
A_ : List[Any] = 24
elif checkpoint == "large":
A_ : Optional[int] = 2048
A_ : Optional[int] = 48
A_ : Tuple = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A_ : Tuple = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
"""simple docstring"""
A_ : Any = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A_ : str = decoder_config_from_checkpoint(_UpperCAmelCase )
A_ : Optional[int] = fairseq_model.lm.state_dict()
A_ , A_ : str = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : List[str] = TaEncoderModel.from_pretrained('''t5-base''' )
A_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
A_ : Union[str, Any] = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : Tuple = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
A_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
A_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
A_ : Optional[int] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A_ : Tuple = 2048
A_ : Union[str, Any] = 2048
# set other default generation config params
A_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A : Tuple = parser.parse_args()
__A : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : int = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''')
except HTTPError:
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def A (__A : Dict , __A : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = flatten_dict(modela.params )
UpperCAmelCase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase_ = False
return models_are_equal
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case))
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case) , max_shard_size='''10KB''')
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
| 51 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
_UpperCamelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCamelCase = np.concatenate(__snake_case, axis=0 )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image.transpose(0, 3, 1, 2 )
_UpperCamelCase = 2.0 * image - 1.0
_UpperCamelCase = torch.from_numpy(__snake_case )
elif isinstance(image[0], torch.Tensor ):
_UpperCamelCase = torch.cat(__snake_case, dim=0 )
return image
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(__snake_case, np.ndarray ):
_UpperCamelCase = True
_UpperCamelCase = va.device
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
_UpperCamelCase = (1 - t) * va + t * va
else:
_UpperCamelCase = np.arccos(__snake_case )
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = theta_a * t
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase = sin_theta_t / sin_theta_a
_UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase = value
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
_UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a)
else feature_extractor.size['''shortest_edge''']
)
_UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , __a)
set_requires_grad(self.clip_model , __a)
def UpperCAmelCase ( self , __a = "auto") -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> Tuple:
'''simple docstring'''
if not isinstance(__a , torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a):
_UpperCamelCase = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_UpperCamelCase = torch.cat(__a , dim=0)
else:
_UpperCamelCase = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 0.1_8215 * init_latents
_UpperCamelCase = init_latents.repeat_interleave(__a , dim=0)
_UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a)
# get latents
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('''<end_of_text>''')[0].replace('''<start_of_text>''' , '''''').rstrip(''' .,''')
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extractor.preprocess(__a)
_UpperCamelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0]).unsqueeze(0).to(self.device).half()
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = latents.detach().requires_grad_()
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase = torch.sqrt(__a)
_UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.sigmas[index]
_UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * sample
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = transforms.Resize(self.feature_extractor_size)(__a)
_UpperCamelCase = self.normalize(__a).to(latents.dtype)
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = spherical_dist_loss(__a , __a).mean() * clip_guidance_scale
_UpperCamelCase = -torch.autograd.grad(__a , __a)[0]
if isinstance(self.scheduler , __a):
_UpperCamelCase = latents.detach() + grads * (sigma**2)
_UpperCamelCase = noise_pred_original
else:
_UpperCamelCase = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> Dict:
'''simple docstring'''
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__a)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(__a , torch.Generator) and batch_size > 1:
_UpperCamelCase = [generator] + [None] * (batch_size - 1)
_UpperCamelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase = ''', '''.join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
# get prompt text embeddings for content and style
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = slerp(__a , __a , __a)
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0)
# set timesteps
_UpperCamelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCamelCase = {}
if accepts_offset:
_UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# Preprocess image
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = slerp(__a , __a , __a)
if clip_guidance_scale > 0:
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = slerp(
__a , __a , __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = content_text_input.input_ids.shape[-1]
_UpperCamelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=__a , return_tensors='''pt''')
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase = torch.randn(__a , generator=__a , device='''cpu''' , dtype=__a).to(
self.device)
else:
_UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_UpperCamelCase = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
# check if the scheduler accepts generator
_UpperCamelCase = '''generator''' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCamelCase = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2)
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase , _UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a)
| 194 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : Any = []
def generate(UpperCAmelCase_ , UpperCAmelCase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCamelCase , _UpperCamelCase : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_UpperCamelCase , _UpperCamelCase : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , UpperCAmelCase_ )
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter numbers separated by a comma:\n').strip()
snake_case_ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 236 |
'''simple docstring'''
from torch import nn
def A__ ( UpperCAmelCase_ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 236 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE :int = 'Hello world! cécé herlolip'
SCREAMING_SNAKE_CASE :Any = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
__A = BertAbsConfig(
temp_dir="." , finetune_bert=a_ , large=a_ , share_emb=a_ , use_bert_emb=a_ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
__A = torch.load(a_ , lambda a_ , a_ : storage )
__A = AbsSummarizer(a_ , torch.device("cpu" ) , a_ )
original.eval()
__A = BertAbsSummarizer(a_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__A = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__A = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
__A = torch.tensor(a_ ).unsqueeze(0 )
__A = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a_ )) )
__A = torch.tensor(a_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__A = encoder_input_ids
__A = decoder_input_ids
__A = __A = None
__A = None
__A = __A = None
__A = __A = None
__A = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__A = original(a_ , a_ , a_ , a_ , a_ , a_ , a_ )[0]
__A = original.generator(a_ )
__A = new_model(
a_ , a_ , a_ , a_ , a_ )[0]
__A = new_model.generator(a_ )
__A = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(a_ ) )
__A = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(a_ ) )
__A = torch.allclose(a_ , a_ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 15 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision", [None, "v2"] )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Dict = hf_hub_url(repo_id=_UpperCAmelCase, path=_UpperCAmelCase, revision=_UpperCAmelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_UpperCAmelCase )}"
| 37 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__A = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__A = get_tests_dir("fixtures/vocab.json")
__A = get_tests_dir("fixtures")
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ):
lowercase__: str = 0
def _snake_case ( self ):
lowercase__: Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Union[str, Any] = WavaVecaConfig()
lowercase__: List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowercase__: Dict = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
lowercase__: str = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = WavaVecaFeatureExtractor()
lowercase__: List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase__: Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase )
# save in new folder
processor.save_pretrained(__lowerCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f:
lowercase__: Dict = json.load(__lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f:
f.write(json.dumps(__lowerCamelCase ) )
lowercase__: int = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Tuple = WavaVecaFeatureExtractor()
lowercase__: List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase__: Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase )
# save in new folder
processor.save_pretrained(__lowerCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f:
lowercase__: Optional[Any] = json.load(__lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f:
f.write(json.dumps(__lowerCamelCase ) )
lowercase__: str = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Tuple = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(__lowerCamelCase )
# copy relevant files
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f:
f.write('''{}''' )
lowercase__: Optional[int] = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
lowercase__: Any = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
lowercase__: Optional[Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase )
lowercase__: str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase__: Any = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase__: Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase__: str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
lowercase__: List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _snake_case ( self ):
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__: Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Optional[int] = os.path.join(__lowerCamelCase , '''vocab.txt''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__: List[str] = CustomTokenizer(__lowerCamelCase )
lowercase__: Tuple = CustomProcessor(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowerCamelCase )
lowercase__: Tuple = AutoProcessor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = False
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = False
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "AutoFeatureExtractor"
_UpperCAmelCase :List[str] = "AutoTokenizer"
_UpperCAmelCase :Optional[Any] = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local classes.
lowercase__: Dict = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase__: Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase__: Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
lowercase__: Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _snake_case ( self ):
lowercase__: Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ):
lowercase__: Any = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def _snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _snake_case ( self ):
lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCamelCase , '''test-processor''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
lowercase__: str = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ):
lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCamelCase , '''test-processor-org''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token , organization='''valid_org''' , )
lowercase__: List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase__: Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: str = os.path.join(__lowerCamelCase , '''vocab.txt''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase__: List[Any] = CustomTokenizer(__lowerCamelCase )
lowercase__: int = CustomProcessor(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase__: Optional[Any] = Repository(__lowerCamelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(__lowerCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) ) as f:
lowercase__: Any = json.load(__lowerCamelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase__: List[Any] = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 177 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowercase : int = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_lowercase : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_lowercase : Union[str, Any] = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def a ( self : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] ):
__UpperCAmelCase = 0.0
for i, j in zip(_lowercase , _lowercase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowercase , _lowercase ) else 0.0
__UpperCAmelCase = n_correct / len(_lowercase )
return {
"accuracy": accuracy,
}
| 364 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : List[str] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_lowercase : Optional[Any] = {'mobilebert-uncased': 5_12}
_lowercase : Optional[int] = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = MobileBertTokenizer
def __init__( self : int , _lowercase : Union[str, Any]=None , _lowercase : int=None , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : Union[str, Any]="[SEP]" , _lowercase : str="[PAD]" , _lowercase : List[Any]="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : Any=True , _lowercase : str=None , **_lowercase : Dict , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : str=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """ChineseCLIPImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
lowerCamelCase__ = kwargs.pop('''feature_extractor''' )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 209 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "CLIPImageProcessor"
SCREAMING_SNAKE_CASE = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__(self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
A = kwargs.pop("feature_extractor")
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__(self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[Any]):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
A = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if images is not None:
A = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 57 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "speech_to_text_2"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__(self : Any , __SCREAMING_SNAKE_CASE : List[str]=1_0_0_0_0 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : Dict=2_0_4_8 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_5_6 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : int=1_0_2_4 , **__SCREAMING_SNAKE_CASE : str , ):
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 57 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Optional[int] = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
_UpperCamelCase : Optional[int] = "▁"
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[str] = AlbertTokenizer
def __init__( self , a=None , a=None , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase__ : Dict = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowercase__ : int = do_lower_case
lowercase__ : Any = remove_space
lowercase__ : Optional[int] = keep_accents
lowercase__ : int = vocab_file
lowercase__ : Tuple = False if not self.vocab_file else True
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Union[str, Any] = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 77 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCamelCase__ : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCamelCase__ : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCamelCase__ : int = BeautifulSoup(res.text, 'html.parser')
lowerCamelCase__ : List[str] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''') | 225 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE (__snake_case , __snake_case ):
"""simple docstring"""
__a ='nat'
__a ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Dict , __a : Dict=4 , __a : Any=3 , __a : int=64 , __a : int=[3, 4, 6, 5] , __a : int=[2, 4, 8, 16] , __a : Dict=7 , __a : Union[str, Any]=3.0 , __a : int=True , __a : List[Any]=0.0 , __a : List[Any]=0.0 , __a : Optional[Any]=0.1 , __a : Optional[Any]="gelu" , __a : Dict=0.02 , __a : Any=1e-5 , __a : Optional[int]=0.0 , __a : Any=None , __a : int=None , **__a : Any , ):
super().__init__(**__a )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__a )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__a ) - 1) )
_a = layer_scale_init_value
_a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 362 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Dict = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
lowerCAmelCase_ :List[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock"""
lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowerCAmelCase_ :Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 84 |
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]:
_A : Optional[Any] = parent
_A : Any = batch_size
_A : Tuple = seq_length
_A : Tuple = is_training
_A : Tuple = use_attention_mask
_A : str = use_token_type_ids
_A : Union[str, Any] = use_labels
_A : List[str] = vocab_size
_A : List[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : str = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : List[str] = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : str = initializer_range
_A : str = num_choices
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_attention_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : int = None
if self.use_token_type_ids:
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Dict:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A , _A : Tuple = config_and_inputs
_A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def a__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_A : Any = model_class_name.from_pretrained("""albert-base-v2""" )
_A : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[Any]:
_A : List[str] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_A : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_A : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_A : Optional[int] = model(_a , attention_mask=_a )[0]
_A : str = (1, 11, 768)
self.assertEqual(output.shape , _a )
_A : str = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 343 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case_ = [ord(letter) for letter in string.ascii_lowercase]
snake_case_ = {ord(char) for char in VALID_CHARS}
snake_case_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = ""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowercase_ ) , lowercase_ ):
UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
for key in product(lowercase_ , repeat=3 ):
UpperCAmelCase = try_key(lowercase_ , lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCAmelCase ( lowercase_ = "p059_cipher.txt" ):
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding='utf-8' )
UpperCAmelCase = [int(lowercase_ ) for number in data.strip().split(',' )]
UpperCAmelCase = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
UpperCAmelCase = filter_common_word(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
break
UpperCAmelCase = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__A =namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
UpperCAmelCase__ : Any = TestCommand(*UpperCamelCase__ )
test_command.run()
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , """README.md""" )
assert os.path.exists(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = getattr(dataset_infos["""default"""] , UpperCamelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected | 163 | 0 |
"""simple docstring"""
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [False] * len(a__)
SCREAMING_SNAKE_CASE = [-1] * len(a__)
def dfs(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = c
for u in graph[v]:
if not visited[u]:
dfs(a__ , 1 - c)
for i in range(len(a__)):
if not visited[i]:
dfs(a__ , 0)
for i in range(len(a__)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 360 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """wavlm"""
def __init__( self : Any , lowercase_ : Dict=32 , lowercase_ : Tuple=768 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Any=3_072 , lowercase_ : Any="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=1E-5 , lowercase_ : Optional[int]="group" , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : str=False , lowercase_ : Dict=128 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[Any]=320 , lowercase_ : int=800 , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : str=0.05 , lowercase_ : Dict=10 , lowercase_ : Tuple=2 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : Optional[int]=320 , lowercase_ : Tuple=2 , lowercase_ : List[str]=0.1 , lowercase_ : int=100 , lowercase_ : Any=256 , lowercase_ : Any=256 , lowercase_ : List[str]=0.1 , lowercase_ : Union[str, Any]="mean" , lowercase_ : Optional[int]=False , lowercase_ : Any=False , lowercase_ : Tuple=256 , lowercase_ : Tuple=(512, 512, 512, 512, 1_500) , lowercase_ : Tuple=(5, 3, 3, 1, 1) , lowercase_ : str=(1, 2, 3, 1, 1) , lowercase_ : Dict=512 , lowercase_ : Optional[int]=80 , lowercase_ : Tuple=0 , lowercase_ : Dict=1 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=False , lowercase_ : int=3 , lowercase_ : int=2 , lowercase_ : Union[str, Any]=3 , lowercase_ : Optional[int]=None , **lowercase_ : str , ) -> int:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Optional[int] = feat_extract_norm
UpperCAmelCase : List[str] = feat_extract_activation
UpperCAmelCase : Optional[Any] = list(lowercase_ )
UpperCAmelCase : List[str] = list(lowercase_ )
UpperCAmelCase : str = list(lowercase_ )
UpperCAmelCase : List[str] = conv_bias
UpperCAmelCase : Dict = num_buckets
UpperCAmelCase : str = max_bucket_distance
UpperCAmelCase : str = num_conv_pos_embeddings
UpperCAmelCase : int = num_conv_pos_embedding_groups
UpperCAmelCase : str = len(self.conv_dim )
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : str = hidden_dropout
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Tuple = feat_proj_dropout
UpperCAmelCase : Optional[int] = final_dropout
UpperCAmelCase : List[Any] = layerdrop
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Any = num_ctc_classes
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Union[str, Any] = do_stable_layer_norm
UpperCAmelCase : Tuple = use_weighted_layer_sum
UpperCAmelCase : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : List[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Dict = mask_time_length
UpperCAmelCase : int = mask_time_min_masks
UpperCAmelCase : Optional[int] = mask_feature_prob
UpperCAmelCase : str = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : int = num_codevectors_per_group
UpperCAmelCase : Optional[int] = num_codevector_groups
UpperCAmelCase : Any = contrastive_logits_temperature
UpperCAmelCase : Optional[int] = num_negatives
UpperCAmelCase : List[Any] = codevector_dim
UpperCAmelCase : List[str] = proj_codevector_dim
UpperCAmelCase : str = diversity_loss_weight
# ctc loss
UpperCAmelCase : int = ctc_loss_reduction
UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# adapter
UpperCAmelCase : Any = add_adapter
UpperCAmelCase : int = adapter_kernel_size
UpperCAmelCase : List[str] = adapter_stride
UpperCAmelCase : Optional[Any] = num_adapter_layers
UpperCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Any = list(lowercase_ )
UpperCAmelCase : Tuple = list(lowercase_ )
UpperCAmelCase : Union[str, Any] = list(lowercase_ )
UpperCAmelCase : int = xvector_output_dim
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 151 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowercase__ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
return 12
@property
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
return 12
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = 12
UpperCAmelCase : Optional[int] = 12
UpperCAmelCase : Tuple = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCAmelCase : Optional[Any] = TransformeraDModel(**lowercase_ )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Any = 'cpu'
UpperCAmelCase : Optional[int] = self.dummy_vqvae
UpperCAmelCase : List[str] = self.dummy_text_encoder
UpperCAmelCase : str = self.dummy_tokenizer
UpperCAmelCase : Dict = self.dummy_transformer
UpperCAmelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
UpperCAmelCase : Optional[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Any = 'teddy bear playing in the pool'
UpperCAmelCase : List[str] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : str = 'cpu'
UpperCAmelCase : List[str] = self.dummy_vqvae
UpperCAmelCase : Tuple = self.dummy_text_encoder
UpperCAmelCase : Dict = self.dummy_tokenizer
UpperCAmelCase : int = self.dummy_transformer
UpperCAmelCase : Optional[int] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase : List[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = 'teddy bear playing in the pool'
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Tuple = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Tuple = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : int = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
UpperCAmelCase : Tuple = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
UpperCAmelCase : str = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase : Any = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Any = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 151 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase =logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["pixel_values"]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = True , snake_case = None , snake_case = None , **snake_case , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase_)
_UpperCAmelCase : Tuple =size if size is not None else {'shortest_edge': 2_5_6}
_UpperCAmelCase : str =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_)
_UpperCAmelCase : Union[str, Any] =crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
_UpperCAmelCase : Optional[Any] =get_size_dict(lowerCamelCase_ , param_name='crop_size')
_UpperCAmelCase : int =do_resize
_UpperCAmelCase : Optional[int] =size
_UpperCAmelCase : Dict =do_center_crop
_UpperCAmelCase : Tuple =crop_size
_UpperCAmelCase : Union[str, Any] =resample
_UpperCAmelCase : int =do_rescale
_UpperCAmelCase : int =rescale_factor
_UpperCAmelCase : Union[str, Any] =offset
_UpperCAmelCase : Optional[Any] =do_normalize
_UpperCAmelCase : Optional[Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : List[str] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_)
if "shortest_edge" in size:
_UpperCAmelCase : int =get_resize_output_image_size(lowerCamelCase_ , size['shortest_edge'] , default_to_square=lowerCamelCase_)
elif "height" in size and "width" in size:
_UpperCAmelCase : Tuple =(size['height'], size['width'])
else:
raise ValueError(f"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}")
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have \'height\' and \'width\' as keys. Got {size.keys()}")
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = True , snake_case = None , **snake_case , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : str =image.astype(np.floataa)
if offset:
_UpperCAmelCase : Optional[Any] =image - (scale / 2)
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.')
# All transformations expect numpy arrays.
_UpperCAmelCase : Union[str, Any] =to_numpy_array(lowerCamelCase_)
if do_resize:
_UpperCAmelCase : str =self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_)
if do_center_crop:
_UpperCAmelCase : Optional[Any] =self.center_crop(lowerCamelCase_ , size=lowerCamelCase_)
if do_rescale:
_UpperCAmelCase : Optional[Any] =self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , offset=lowerCamelCase_)
if do_normalize:
_UpperCAmelCase : List[Any] =self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_)
_UpperCAmelCase : Tuple =to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_)
return image
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Tuple =resample if resample is not None else self.resample
_UpperCAmelCase : str =do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : List[str] =do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : str =offset if offset is not None else self.offset
_UpperCAmelCase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Any =image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Dict =image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] =size if size is not None else self.size
_UpperCAmelCase : Optional[int] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_)
_UpperCAmelCase : Tuple =crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Optional[int] =get_size_dict(lowerCamelCase_ , param_name='crop_size')
if not valid_images(lowerCamelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
_UpperCAmelCase : Optional[Any] =make_batched(lowerCamelCase_)
_UpperCAmelCase : Tuple =[
[
self._preprocess_image(
image=lowerCamelCase_ , do_resize=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , do_center_crop=lowerCamelCase_ , crop_size=lowerCamelCase_ , do_rescale=lowerCamelCase_ , rescale_factor=lowerCamelCase_ , offset=lowerCamelCase_ , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , data_format=lowerCamelCase_ , )
for img in video
]
for video in videos
]
_UpperCAmelCase : List[str] ={'pixel_values': videos}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_)
| 361 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase ={
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242 | 0 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 10 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Optional[int]:
"""simple docstring"""
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''''''
else:
A__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = dct.pop(_UpperCamelCase )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = ViTMSNConfig()
A__ = 1_000
A__ = '''datasets/huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , '''r''' ) )
A__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A__ = 384
A__ = 1_536
A__ = 6
elif "l16" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
A__ = 0.1
elif "b4" in checkpoint_url:
A__ = 4
elif "l7" in checkpoint_url:
A__ = 7
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
A__ = 0.1
A__ = ViTMSNModel(_UpperCamelCase )
A__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''target_encoder''']
A__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
A__ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
A__ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
A__ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**_UpperCamelCase )
A__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A__ = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
A__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
A__ = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A__ = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 359 |
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 0 |
SCREAMING_SNAKE_CASE__ : Dict = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
SCREAMING_SNAKE_CASE__ : List[Any] = concatenate_datasets
SCREAMING_SNAKE_CASE__ : List[str] = DownloadConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = DownloadManager
SCREAMING_SNAKE_CASE__ : Dict = DownloadMode
SCREAMING_SNAKE_CASE__ : List[str] = DownloadConfig
SCREAMING_SNAKE_CASE__ : List[str] = DownloadMode
SCREAMING_SNAKE_CASE__ : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 48 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : dict , UpperCAmelCase : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =set(UpperCAmelCase ), [start]
while stack:
UpperCamelCase__ : Dict =stack.pop()
explored.add(UpperCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase )
return explored
_SCREAMING_SNAKE_CASE : List[Any] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 353 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =1
UpperCamelCase__ : List[str] =3
UpperCamelCase__ : Optional[Any] =(32, 32)
UpperCamelCase__ : Tuple =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowercase_ )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
def extract(*lowercase_ : Dict , **lowercase_ : List[Any] ):
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] ):
UpperCamelCase__ : Dict =torch.ones([0] )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] ):
self.pixel_values.to(lowercase_ )
return self
return Out()
return extract
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any =self.dummy_cond_unet
UpperCamelCase__ : Tuple =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : List[str] =self.dummy_text_encoder
UpperCamelCase__ : List[str] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : Tuple =77
UpperCamelCase__ : int =self.dummy_image.to(lowercase_ )
UpperCamelCase__ : Tuple =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : Union[str, Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Tuple ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : str =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , )
UpperCamelCase__ : Any =output.images
UpperCamelCase__ : Tuple =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , return_dict=lowercase_ , )[0]
UpperCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCamelCase__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Optional[Any] =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : List[Any] =self.dummy_cond_unet
UpperCamelCase__ : int =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : Dict =self.dummy_text_encoder
UpperCamelCase__ : Optional[int] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : List[Any] =77
UpperCamelCase__ : List[Any] =self.dummy_image.to(lowercase_ )
# put models in fp16
UpperCamelCase__ : Dict =unet.half()
UpperCamelCase__ : List[str] =vae.half()
UpperCamelCase__ : int =bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Union[str, Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : List[Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[Any] =torch.manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ : int =init_image.resize((760, 504) )
UpperCamelCase__ : Optional[int] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : Dict ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : str =torch.manual_seed(0 )
UpperCamelCase__ : Any =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
UpperCamelCase__ : int =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase__ : Union[str, Any] =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase__ : List[Any] =init_image.resize((768, 512) )
UpperCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
UpperCamelCase__ : List[str] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : List[Any] ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : List[Any] =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 157 | 0 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowerCamelCase : List[str] = TypeVar("T")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42 # Cache store of keys
_SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
_SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = deque()
UpperCamelCase = set()
if not n:
UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase = n
def A ( self : Any , UpperCamelCase__ : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 28 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Tuple ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MraModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def A ( self : List[str] ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = MgpstrTokenizer
A : Any = False
A : List[Any] = {}
A : Optional[int] = False
def _lowerCAmelCase ( self ) -> str:
super().setUp()
# fmt: off
snake_case_ : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = "tester"
snake_case_ : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
def _lowerCAmelCase ( self ) -> int:
snake_case_ : str = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Optional[int] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : Dict = tokenizer.encode([special_token] , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
snake_case_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ , snake_case_ : str = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
snake_case_ : Tuple = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(text_a.replace(" " , "" ) , _SCREAMING_SNAKE_CASE )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowerCAmelCase ( self ) -> int:
pass
| 36 |
def lowerCAmelCase__ ( _a : int = 50 ):
snake_case_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 36 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_UpperCamelCase: Dict = logging.get_logger(__name__)
_UpperCamelCase: List[Any] = 'Hello, World!'
_UpperCamelCase: Dict = 'en_XX'
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] = Path('data_bin' )
lowercase : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCAmelCase ).parent ) , checkpoint_file=Path(_UpperCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_UpperCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_UpperCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_UpperCAmelCase )
lowercase : str = xmod.model.encoder.sentence_encoder
lowercase : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase : str = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _UpperCAmelCase )
lowercase : List[str] = XmodForSequenceClassification(_UpperCAmelCase ) if classification_head else XmodForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase : Optional[int] = xmod_sent_encoder.embed_tokens.weight
lowercase : Dict = xmod_sent_encoder.embed_positions.weight
lowercase : Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowercase : List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase : Any = model.roberta.encoder.layer[i]
lowercase : str = xmod_sent_encoder.layers[i]
# self attention
lowercase : str = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowercase : str = xmod_layer.self_attn.q_proj.weight
lowercase : List[str] = xmod_layer.self_attn.q_proj.bias
lowercase : Union[str, Any] = xmod_layer.self_attn.k_proj.weight
lowercase : Any = xmod_layer.self_attn.k_proj.bias
lowercase : List[Any] = xmod_layer.self_attn.v_proj.weight
lowercase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowercase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
lowercase : Tuple = xmod_layer.self_attn.out_proj.bias
lowercase : List[str] = xmod_layer.self_attn_layer_norm.weight
lowercase : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase : List[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowercase : Any = xmod_layer.fca.weight
lowercase : Dict = xmod_layer.fca.bias
# output
lowercase : Optional[int] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowercase : List[str] = xmod_layer.fca.weight
lowercase : Dict = xmod_layer.fca.bias
lowercase : str = xmod_layer.final_layer_norm.weight
lowercase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase : Tuple = xmod_layer.adapter_layer_norm.weight
lowercase : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase : Any = bert_output.adapter_modules[lang_code]
lowercase : Tuple = xmod_layer.adapter_modules[lang_code]
lowercase : int = from_adapter.fca.weight
lowercase : List[str] = from_adapter.fca.bias
lowercase : int = from_adapter.fca.weight
lowercase : Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
lowercase : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase : str = xmod.model.classification_heads['mnli'].dense.weight
lowercase : Dict = xmod.model.classification_heads['mnli'].dense.bias
lowercase : Optional[int] = xmod.model.classification_heads['mnli'].out_proj.weight
lowercase : Any = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowercase : Any = xmod.model.encoder.lm_head.dense.weight
lowercase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
lowercase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
lowercase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowercase : str = xmod.model.encoder.lm_head.weight
lowercase : Dict = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase : List[Any] = xmod.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCAmelCase )
lowercase : Tuple = model(_UpperCAmelCase )[0]
if classification_head:
lowercase : List[str] = xmod.model.classification_heads['mnli'](xmod.extract_features(_UpperCAmelCase ) )
else:
lowercase : List[Any] = xmod.model(_UpperCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase : str = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCamelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase: int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 255 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : Tuple = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : Optional[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 255 | 1 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 100 ):
'''simple docstring'''
lowerCamelCase : List[Any] = set()
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Dict = n + 1 # maximum limit
for a in range(2, a_ ):
for b in range(2, a_ ):
lowerCamelCase : List[Any] = a**b # calculates the current power
collect_powers.add(a_ ) # adds the result to the set
return len(a_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 205 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A = pytest.mark.integration
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _UpperCamelCase ( self ) -> List[Any]:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
lowerCamelCase : Optional[int] = dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ )
lowerCamelCase : Dict = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def _UpperCamelCase ( self ) -> Tuple:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> int:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Tuple = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
import faiss
lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : List[str] = 1
lowerCamelCase , lowerCamelCase : int = index.search(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase : Tuple = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase : List[str] = index.search_batch(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search_batch , queries[0] )
lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
import faiss
lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase : str = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = faiss.IndexFlat(5 )
lowerCamelCase : Any = FaissIndex(custom_index=UpperCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase , lowerCamelCase : str = index.search(UpperCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCAmelCase ( a_ ):
'''simple docstring'''
import faiss
lowerCamelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
lowerCamelCase : Union[str, Any] = 'index.faiss'
lowerCamelCase : List[Any] = F"""mock://{index_name}"""
index.save(a_, storage_options=mockfs.storage_options )
lowerCamelCase : Optional[int] = FaissIndex.load(a_, storage_options=mockfs.storage_options )
lowerCamelCase : str = np.zeros(5, dtype=np.floataa )
lowerCamelCase : str = 1
lowerCamelCase , lowerCamelCase : int = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Union[str, Any] = Elasticsearch()
lowerCamelCase : Optional[Any] = {'acknowledged': True}
lowerCamelCase : str = ElasticSearchIndex(es_client=UpperCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase : Tuple = 'foo'
lowerCamelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Any = index.search(UpperCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase : Dict = 'foo'
lowerCamelCase : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Optional[Any] = index.search(UpperCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase : str = ['foo', 'bar', 'foobar']
lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Optional[int] = index.search_batch(UpperCAmelCase_ )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
# batched queries with timeout
lowerCamelCase : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Dict = index.search_batch(UpperCAmelCase_ , request_timeout=30 )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
| 205 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self):
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Linear(3 , 4)
__SCREAMING_SNAKE_CASE = nn.BatchNormad(4)
__SCREAMING_SNAKE_CASE = nn.Linear(4 , 5)
def snake_case_ ( self , lowerCAmelCase__):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__)))
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , model.state_dict())
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """index.json""")
self.assertTrue(os.path.isfile(lowerCAmelCase__))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , f"{key}.dat")
self.assertTrue(os.path.isfile(lowerCAmelCase__))
# TODO: add tests on the fact weights are properly loaded
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 , dtype=lowerCAmelCase__)
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = offload_weight(lowerCAmelCase__ , """weight""" , lowerCAmelCase__ , {})
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """weight.dat""")
self.assertTrue(os.path.isfile(lowerCAmelCase__))
self.assertDictEqual(lowerCAmelCase__ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(lowerCAmelCase__).split(""".""")[1]}})
__SCREAMING_SNAKE_CASE = load_offloaded_weight(lowerCAmelCase__ , index["""weight"""])
self.assertTrue(torch.equal(lowerCAmelCase__ , lowerCAmelCase__))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = model.state_dict()
__SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" not in k}
__SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key]))
__SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" in k}
__SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__)
# Duplicates are removed
__SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key]))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
__SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCAmelCase__ , ["""a.1""", """a.2"""])
self.assertDictEqual(lowerCAmelCase__ , {"""a.1""": 0, """a.2""": 2})
__SCREAMING_SNAKE_CASE = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
__SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCAmelCase__ , ["""a.1""", """a.2"""])
self.assertDictEqual(lowerCAmelCase__ , {"""a.1.a""": 0, """a.2.a""": 2})
| 100 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
__a , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__)
else:
raise ValueError("""Unsupported framework""")
return masked_index
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.ensure_exactly_one_mask_token(lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1))
__SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence}
row.append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
if single_mask:
return result[0]
return result
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__)
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""]
if len(lowerCAmelCase__) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""")
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__))
if len(lowerCAmelCase__) == 0:
raise ValueError("""At least one target must be provided when passed.""")
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__)
return target_ids
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""")
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
| 100 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : int, __A : Union[str, Any], __A : Union[str, Any]=1_3, __A : int=6_4, __A : List[Any]=2, __A : Tuple=3, __A : Dict=True, __A : Optional[int]=True, __A : int=3_2, __A : Tuple=5, __A : Union[str, Any]=4, __A : int=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Optional[Any]=1_0, __A : Any=0.0_2, __A : Optional[Any]=[1, 1_6, 4, 4], __A : Union[str, Any]=None, ):
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Any = is_training
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = scope
UpperCAmelCase : Any = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : Dict = (self.image_size // 3_2) ** 2
UpperCAmelCase : Optional[Any] = num_patches + 1
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : List[Any], __A : str, __A : Optional[Any], __A : int ):
UpperCAmelCase : List[str] = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str], __A : Optional[int], __A : Union[str, Any], __A : List[Any] ):
UpperCAmelCase : Any = self.type_sequence_label_size
UpperCAmelCase : str = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : int = ViTHybridModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Optional[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : Optional[int] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> List[Any]:
UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Dict ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : int ):
UpperCAmelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Optional[Any] = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Any = model(**__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : Optional[int] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : List[Any] = model(**__A )
UpperCAmelCase : Dict = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Optional[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 356 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : Any, __A : Optional[Any], __A : Optional[Any]=3, __A : Union[str, Any]=3_2, __A : Optional[int]=3, __A : str=1_0, __A : Union[str, Any]=[8, 1_6, 3_2, 6_4], __A : List[str]=[1, 1, 2, 1], __A : Dict=True, __A : List[Any]=True, __A : int="relu", __A : Optional[Any]=3, __A : Any=None, __A : Any=["stage2", "stage3", "stage4"], __A : Optional[int]=[2, 3, 4], __A : Any=1, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[Any] = embeddings_size
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : Any = depths
UpperCAmelCase : int = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Any = num_labels
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : int = len(__A )
UpperCAmelCase : Union[str, Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Tuple = num_groups
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Tuple ):
return BitConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, )
def __magic_name__ ( self : int, __A : str, __A : List[Any], __A : Any ):
UpperCAmelCase : Optional[int] = BitModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def __magic_name__ ( self : List[Any], __A : Any, __A : Union[str, Any], __A : Dict ):
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = BitForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[int], __A : Any, __A : List[str], __A : str ):
UpperCAmelCase : Any = BitBackbone(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = BitBackbone(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = BitModelTester(self )
UpperCAmelCase : Any = ConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __magic_name__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __magic_name__ ( self : Dict ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
def __magic_name__ ( self : Dict ):
def check_hidden_states_output(__A : List[Any], __A : Optional[int], __A : int ):
UpperCAmelCase : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Any = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : int = layer_type
UpperCAmelCase : List[Any] = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Dict = True
check_hidden_states_output(__A, __A, __A )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __magic_name__ ( self : List[str] ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __magic_name__ ( self : List[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**__A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : int = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase = BitConfig
UpperCamelCase = False
def __magic_name__ ( self : int ):
UpperCAmelCase : int = BitModelTester(self )
| 99 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_a = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_a = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(__snake_case )
return pairs
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __a , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , **__a , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , **__a , )
_UpperCamelCase = vocab_file
_UpperCamelCase = merges_file
_UpperCamelCase = {}
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
self.add_from_file(__a)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(__a , encoding='''utf-8''') as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''')[:-1]
_UpperCamelCase = [tuple(merge.split()[:-1]) for merge in merges]
_UpperCamelCase = dict(zip(__a , range(len(__a))))
_UpperCamelCase = {}
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __a , __a = None , __a = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(__a)
_UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
_UpperCamelCase = get_pairs(__a)
if not pairs:
return token
while True:
_UpperCamelCase = min(__a , key=lambda __a: self.bpe_ranks.get(__a , float('''inf''')))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(__a):
try:
_UpperCamelCase = word.index(__a , __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCamelCase = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(__a)
_UpperCamelCase = new_word
if len(__a) == 1:
break
else:
_UpperCamelCase = get_pairs(__a)
_UpperCamelCase = '''@@ '''.join(__a)
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
return word
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = re.findall(R'''\S+\n?''' , __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(''' ''')))
return split_tokens
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
return self.encoder.get(__a , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__a , self.unk_token)
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ''' '''.join(__a).replace('''@@ ''' , '''''').strip()
return out_string
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file , __a)
if os.path.abspath(self.merges_file) != os.path.abspath(__a):
copyfile(self.merges_file , __a)
return out_vocab_file, out_merge_file
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
if isinstance(__a , __a):
try:
with open(__a , '''r''' , encoding='''utf-8''') as fd:
self.add_from_file(__a)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''')
return
_UpperCamelCase = f.readlines()
for lineTmp in lines:
_UpperCamelCase = lineTmp.strip()
_UpperCamelCase = line.rfind(''' ''')
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''')
_UpperCamelCase = line[:idx]
_UpperCamelCase = len(self.encoder)
| 194 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_a = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
_a = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
_a = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def UpperCAmelCase ( self , __a , __a , __a=None) -> Dict:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__a , __a , sample_weight=__a)),
}
| 194 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=False ):
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(UpperCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE_ : Dict = parse_flag_from_env('RUN_SLOW', default=False)
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
return unittest.skip("""Test was skipped""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : str ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Any ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : str ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : int ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : int ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Dict ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Any ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : int ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None ):
if test_case is None:
return partial(UpperCAmelCase__ , version=UpperCAmelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCAmelCase__ ) , F"""test requires torch version >= {version}""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : List[Any] ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : str ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _snake_case ( UpperCAmelCase_ : int ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCAmelCase__ )
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = True
@classmethod
def UpperCamelCase ( cls: Dict ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
@classmethod
def UpperCamelCase ( cls: List[Any] ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _snake_case ( UpperCAmelCase_ : str ):
A__ = AcceleratorState()
A__ = tensor[None].clone().to(state.device )
A__ = gather(UpperCAmelCase__ ).cpu()
A__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCAmelCase__ ):
return False
return True
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = returncode
A__ = stdout
A__ = stderr
async def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
while True:
A__ = await stream.readline()
if line:
callback(UpperCAmelCase__ )
else:
break
async def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False ):
if echo:
print("""\nRunning: """ , """ """.join(UpperCAmelCase__ ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict="" ):
A__ = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCAmelCase__ )
if not quiet:
print(UpperCAmelCase__ , UpperCAmelCase__ , file=UpperCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCAmelCase_ : tee(UpperCAmelCase__ , UpperCAmelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCAmelCase_ : tee(UpperCAmelCase__ , UpperCAmelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCAmelCase__ , )
return _RunOutput(await p.wait() , UpperCAmelCase__ , UpperCAmelCase__ )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=180 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=True ):
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(UpperCAmelCase__ , env=UpperCAmelCase__ , stdin=UpperCAmelCase__ , timeout=UpperCAmelCase__ , quiet=UpperCAmelCase__ , echo=UpperCAmelCase__ ) )
A__ = """ """.join(UpperCAmelCase__ )
if result.returncode > 0:
A__ = """\n""".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class a ( _snake_case ):
"""simple docstring"""
pass
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=False ):
try:
A__ = subprocess.check_output(UpperCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCAmelCase__ , """decode""" ):
A__ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(UpperCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 362 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase ):
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , tf.convert_to_tensor(UpperCamelCase ) , tf.convert_to_tensor(UpperCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(UpperCamelCase )]
A__ = [torch.tensor(UpperCamelCase )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = image_processor(UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
| 69 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def UpperCamelCase__ ( lowercase__ : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowerCamelCase : Any = 4
_lowerCamelCase : Any = 3
class lowercase ( a ):
pass
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
for shard in shards:
for i in range(lowercase__ ):
yield {"i": i, "shard": shard}
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = int(os.environ["RANK"] )
SCREAMING_SNAKE_CASE = int(os.environ["WORLD_SIZE"] )
SCREAMING_SNAKE_CASE = ArgumentParser()
parser.add_argument("--streaming" , type=lowercase__ )
parser.add_argument("--local_rank" , type=lowercase__ )
parser.add_argument("--num_workers" , type=lowercase__ , default=0 )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.streaming
SCREAMING_SNAKE_CASE = args.num_workers
SCREAMING_SNAKE_CASE = {'shards': [F"shard_{shard_idx}" for shard_idx in range(lowercase__ )]}
SCREAMING_SNAKE_CASE = IterableDataset.from_generator(lowercase__ , gen_kwargs=lowercase__ )
if not streaming:
SCREAMING_SNAKE_CASE = Dataset.from_list(list(lowercase__ ) )
SCREAMING_SNAKE_CASE = split_dataset_by_node(lowercase__ , rank=lowercase__ , world_size=lowercase__ )
SCREAMING_SNAKE_CASE = torch.utils.data.DataLoader(lowercase__ , num_workers=lowercase__ )
SCREAMING_SNAKE_CASE = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 362 | def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 206 | 0 |
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
a : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Dict = 0
while number > 0:
a : Dict = number % 10
sum_of_digits += last_digit
a : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase ( A_ = 100 )-> int:
'''simple docstring'''
a : Union[str, Any] = factorial(A_ )
a : Tuple = split_and_add(A_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 40 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
lowerCAmelCase : Union[str, Any] =[
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 147 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : int , lowercase : Union[str, Any]=13 , lowercase : List[str]=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : Tuple=True , lowercase : int=True , lowercase : List[Any]=99 , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : List[str]=37 , lowercase : Tuple="gelu" , lowercase : List[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : List[str]=512 , lowercase : str=16 , lowercase : Tuple=2 , lowercase : List[Any]=0.02 , lowercase : Dict=False , lowercase : Dict=True , lowercase : int="None" , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ :int = parent
lowercase_ :str = batch_size
lowercase_ :Tuple = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Dict = use_input_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Tuple = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Union[str, Any] = type_vocab_size
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :Any = initializer_range
lowercase_ :List[Any] = num_labels
lowercase_ :str = num_choices
lowercase_ :Optional[Any] = relative_attention
lowercase_ :Tuple = position_biased_input
lowercase_ :Union[str, Any] = pos_att_type
lowercase_ :Tuple = scope
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_input_mask:
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :str = None
lowercase_ :Union[str, Any] = None
lowercase_ :List[str] = None
if self.use_labels:
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = self.get_config()
lowercase_ :Optional[Any] = 300
return config
def lowercase__ ( self : Optional[Any] , lowercase : Dict ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :str = DebertaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0]
lowercase_ :Union[str, Any] = model(lowercase , token_type_ids=lowercase )[0]
lowercase_ :Dict = model(lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Dict = DebertaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.num_labels
lowercase_ :int = DebertaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase )
def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = self.num_labels
lowercase_ :Optional[int] = DebertaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Any = DebertaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :List[str] = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = DebertaModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DebertaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowercase_ :Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase )[0]
# compare the actual values for a slice.
lowercase_ :List[Any] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 147 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = """▁"""
_A = {"""vocab_file""": """sentencepiece.bpe.model"""}
_A = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_A = {
"""facebook/mbart-large-50-one-to-many-mmt""": 10_24,
}
# fmt: off
_A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__(self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
UpperCAmelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = len(self.sp_model )
UpperCAmelCase__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
UpperCAmelCase__ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase__ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase__ : Any = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase__ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a (self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a (self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.__dict__.copy()
UpperCAmelCase__ : Optional[Any] = None
return state
def __setstate__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : Tuple = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : int = """"""
UpperCAmelCase__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
UpperCAmelCase__ : Dict = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : int = [1] * len(self.prefix_tokens )
UpperCAmelCase__ : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase__ : str = src_lang
UpperCAmelCase__ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.convert_tokens_to_ids(_lowerCamelCase )
UpperCAmelCase__ : Dict = tgt_lang_id
return inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = "en_XX" , _lowerCamelCase = None , _lowerCamelCase = "ro_RO" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = src_lang
UpperCAmelCase__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a (self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.lang_code_to_id[src_lang]
UpperCAmelCase__ : Dict = [self.cur_lang_code_id]
UpperCAmelCase__ : Dict = [self.eos_token_id]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase__ : str = [self.cur_lang_code_id]
UpperCAmelCase__ : List[str] = [self.eos_token_id]
| 171 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCAmelCase__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCAmelCase__ : int = text_generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
] , )
UpperCAmelCase__ : Optional[int] = text_generator.model.config.eos_token_id
UpperCAmelCase__ : Any = """<pad>"""
UpperCAmelCase__ : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCAmelCase__ : Dict = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello I believe in"""
UpperCAmelCase__ : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCAmelCase__ : int = text_generator(_lowerCamelCase , stop_sequence=""" fe""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = text_generator.model
UpperCAmelCase__ : Union[str, Any] = text_generator.tokenizer
UpperCAmelCase__ : Any = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : List[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : int = pipeline(task="""text-generation""" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase )
UpperCAmelCase__ : Dict = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : Optional[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase__ : Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Any = text_generator("""test""" , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase__ : Dict = text_generator("""""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase__ : str = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCAmelCase__ : str = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowerCamelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCAmelCase__ : str = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : List[str] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : Any = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=_lowerCamelCase , top_p=0.5 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """Hello world"""
UpperCAmelCase__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCAmelCase__ : Any = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCAmelCase__ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" )
UpperCAmelCase__ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : List[str] = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(_lowerCamelCase , cl.out )
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 )
self.assertNotIn(_lowerCamelCase , cl.out )
| 171 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 288 |
from math import pow
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__: int = int(pow(snake_case , snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__ , lowercase__: Any = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__ , lowercase__: Optional[int] = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
return current_sum, solutions_count
def snake_case_ ( snake_case , snake_case ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(snake_case , snake_case , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Any = embedding_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) ->Optional[int]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MegatronBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = MegatronBertForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = MegatronBertForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Any = MegatronBertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : str = MegatronBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = MegatronBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : str = MegatronBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = True
# test_resize_embeddings = False
__SCREAMING_SNAKE_CASE : Dict = False
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCamelCase )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return torch.tensor(
a__ , dtype=torch.long , device=a__ , )
a__ : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = MegatronBertModel.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
model.half()
SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE : int = output[0, ii, jj]
SCREAMING_SNAKE_CASE : Tuple = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE : Any = '''ii={} jj={} a={} b={}'''.format(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(math.isclose(_lowerCamelCase , _lowerCamelCase , rel_tol=_lowerCamelCase , abs_tol=_lowerCamelCase ) , msg=_lowerCamelCase )
| 313 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313 | 1 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Tuple=None ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = data
UpperCamelCase__ : List[str] = None
def __repr__( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[str] = self
while temp:
string_rep.append(F"{temp.data}" )
UpperCamelCase__ : Dict = temp.next
return "->".join(lowerCamelCase__ )
def _a ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
UpperCamelCase__ : Dict = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[int] = Node(elements_list[i] )
UpperCamelCase__ : Dict = current.next
return head
def _a ( SCREAMING_SNAKE_CASE : Node ):
"""simple docstring"""
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def _a ( ):
"""simple docstring"""
from doctest import testmod
testmod()
UpperCamelCase__ : int = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(SCREAMING_SNAKE_CASE )
print('''Elements in Reverse:''' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 51 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCamelCase : List[Any] = data_utils.TransfoXLTokenizer
__UpperCamelCase : str = data_utils.TransfoXLCorpus
__UpperCamelCase : Dict = data_utils
__UpperCamelCase : List[Any] = data_utils
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as fp:
UpperCamelCase__ : str = pickle.load(SCREAMING_SNAKE_CASE , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ : Tuple = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
UpperCamelCase__ : List[str] = corpus.vocab.__dict__
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ : List[Any] = os.path.abspath(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = os.path.abspath(SCREAMING_SNAKE_CASE )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ : Any = TransfoXLConfig()
else:
UpperCamelCase__ : int = TransfoXLConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ : Dict = TransfoXLLMHeadModel(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = load_tf_weights_in_transfo_xl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCamelCase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__UpperCamelCase : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
_SCREAMING_SNAKE_CASE ,R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' ,)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : Optional[Any] , lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
lowerCAmelCase_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __lowercase ( self : Tuple , lowerCamelCase : GenericTensor ) -> np.ndarray:
lowerCAmelCase_ : Union[str, Any] = self.get_masked_index(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __lowercase ( self : Any , lowerCamelCase : GenericTensor ) -> Any:
if isinstance(lowerCamelCase , lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Any=None , **lowerCamelCase : int ) -> Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase_ : Any = self.framework
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase )
self.ensure_exactly_one_mask_token(lowerCamelCase )
return model_inputs
def __lowercase ( self : Optional[int] , lowerCamelCase : List[str] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.model(**lowerCamelCase )
lowerCAmelCase_ : Tuple = model_inputs["""input_ids"""]
return model_outputs
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Union[str, Any]=None ) -> Tuple:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ : Tuple = target_ids.shape[0]
lowerCAmelCase_ : int = model_outputs["""input_ids"""][0]
lowerCAmelCase_ : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowerCAmelCase_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase_ : Union[str, Any] = outputs.numpy()
lowerCAmelCase_ : int = outputs[0, masked_index, :]
lowerCAmelCase_ : List[str] = stable_softmax(lowerCamelCase , axis=-1 )
if target_ids is not None:
lowerCAmelCase_ : Tuple = tf.gather_nd(tf.squeeze(lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase_ : int = tf.expand_dims(lowerCamelCase , 0 )
lowerCAmelCase_ : Optional[Any] = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ : List[str] = outputs[0, masked_index, :]
lowerCAmelCase_ : str = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase_ : Optional[int] = probs[..., target_ids]
lowerCAmelCase_, lowerCAmelCase_ : int = probs.topk(lowerCamelCase )
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase_ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ : int = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ : Optional[int] = target_ids[p].tolist()
lowerCAmelCase_ : List[Any] = p
# Filter padding out:
lowerCAmelCase_ : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
lowerCAmelCase_ : int = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowerCamelCase )
result.append(lowerCamelCase )
if single_mask:
return result[0]
return result
def __lowercase ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=None ) -> int:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[int] = [targets]
try:
lowerCAmelCase_ : Any = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : Union[str, Any] = []
for target in targets:
lowerCAmelCase_ : Tuple = vocab.get(lowerCamelCase , lowerCamelCase )
if id_ is None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , max_length=1 , truncation=lowerCamelCase , )["""input_ids"""]
if len(lowerCamelCase ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowerCAmelCase_ : Any = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
lowerCAmelCase_ : Optional[Any] = list(set(lowerCamelCase ) )
if len(lowerCamelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowerCAmelCase_ : List[str] = np.array(lowerCamelCase )
return target_ids
def __lowercase ( self : Optional[Any] , lowerCamelCase : int=None , lowerCamelCase : str=None ) -> List[str]:
lowerCAmelCase_ : str = {}
if targets is not None:
lowerCAmelCase_ : str = self.get_target_ids(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = target_ids
if top_k is not None:
lowerCAmelCase_ : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Any , lowerCamelCase : Dict , *lowerCamelCase : Dict , **lowerCamelCase : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = super().__call__(lowerCamelCase , **lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 120 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : str , A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """"""
for word_or_phrase in separated:
if not isinstance(A__ , A__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120 | 1 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
lowerCamelCase : Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_A = imread('image_data/lena.jpg', 1)
# convert to its negative
_A = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 205 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'encoder-decoder'
lowercase_ = True
def __init__( self , **UpperCAmelCase_ ) -> str:
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase : List[Any] = kwargs.pop('encoder' )
lowerCamelCase : Optional[int] = encoder_config.pop('model_type' )
lowerCamelCase : str = kwargs.pop('decoder' )
lowerCamelCase : Dict = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase : int = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : List[str] = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : List[str] = True
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowerCamelCase : str = True
lowerCamelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Union[str, Any] = self.encoder.to_dict()
lowerCamelCase : List[Any] = self.decoder.to_dict()
lowerCamelCase : Tuple = self.__class__.model_type
return output
| 205 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """mobilenet_v1"""
def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.9_99 , lowercase=0.02 , lowercase=0.0_01 , **lowercase , ):
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Any = image_size
_lowerCamelCase : str = depth_multiplier
_lowerCamelCase : Dict = min_depth
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Union[str, Any] = tf_padding
_lowerCamelCase : str = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : int = layer_norm_eps
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_ ( self ):
return 1E-4 | 96 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = """pix2struct_text_model"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : str = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict=5_0_2_4_4 , UpperCamelCase__ : Union[str, Any]=7_6_8 , UpperCamelCase__ : str=6_4 , UpperCamelCase__ : Tuple=2_0_4_8 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : Optional[Any]=3_2 , UpperCamelCase__ : Union[str, Any]=1_2_8 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=1e-6 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : int="gelu_new" , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : List[str] , )-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = vocab_size
__lowerCAmelCase: int = hidden_size
__lowerCAmelCase: Dict = d_kv
__lowerCAmelCase: Union[str, Any] = d_ff
__lowerCAmelCase: List[str] = num_layers
__lowerCAmelCase: Union[str, Any] = num_heads
__lowerCAmelCase: Tuple = relative_attention_num_buckets
__lowerCAmelCase: Optional[Any] = relative_attention_max_distance
__lowerCAmelCase: Dict = dropout_rate
__lowerCAmelCase: Dict = layer_norm_epsilon
__lowerCAmelCase: int = initializer_factor
__lowerCAmelCase: str = use_cache
__lowerCAmelCase: Optional[Any] = eos_token_id
__lowerCAmelCase: Optional[Any] = decoder_start_token_id
# for backwards compatibility
__lowerCAmelCase: str = dense_act_fn
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , is_decoder=UpperCamelCase__ , **UpperCamelCase__ , )
@classmethod
def lowercase_ ( cls : Any , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : str)-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type") == "pix2struct":
__lowerCAmelCase: Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : int = """pix2struct_vision_model"""
def __init__( self : int , UpperCamelCase__ : int=7_6_8 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Optional[Any]=2_0_4_8 , UpperCamelCase__ : List[str]=6_4 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : str=1e-6 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=1e-10 , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : Optional[int]=4_0_9_6 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Tuple=1_2_8 , **UpperCamelCase__ : str , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: List[Any] = patch_embed_hidden_size
__lowerCAmelCase: List[str] = d_ff
__lowerCAmelCase: int = dropout_rate
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: List[str] = initializer_factor
__lowerCAmelCase: Union[str, Any] = attention_dropout
__lowerCAmelCase: Union[str, Any] = layer_norm_eps
__lowerCAmelCase: Dict = dense_act_fn
__lowerCAmelCase: Dict = seq_len
__lowerCAmelCase: List[str] = relative_attention_num_buckets
__lowerCAmelCase: Union[str, Any] = relative_attention_max_distance
__lowerCAmelCase: List[Any] = d_kv
@classmethod
def lowercase_ ( cls : List[Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Union[str, Any])-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type") == "pix2struct":
__lowerCAmelCase: str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Any = """pix2struct"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __init__( self : Any , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=True , **UpperCamelCase__ : int , )-> Dict:
'''simple docstring'''
super().__init__(tie_word_embeddings=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__)
if text_config is None:
__lowerCAmelCase: Any = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.")
if vision_config is None:
__lowerCAmelCase: Dict = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.")
__lowerCAmelCase: Tuple = PixaStructTextConfig(**UpperCamelCase__)
__lowerCAmelCase: Dict = PixaStructVisionConfig(**UpperCamelCase__)
__lowerCAmelCase: str = self.text_config.decoder_start_token_id
__lowerCAmelCase: Dict = self.text_config.pad_token_id
__lowerCAmelCase: Optional[Any] = self.text_config.eos_token_id
__lowerCAmelCase: Optional[Any] = initializer_factor
__lowerCAmelCase: Dict = initializer_range
__lowerCAmelCase: List[str] = self.initializer_range
__lowerCAmelCase: Dict = self.initializer_range
__lowerCAmelCase: Tuple = is_vqa
@classmethod
def lowercase_ ( cls : Union[str, Any] , UpperCamelCase__ : PixaStructTextConfig , UpperCamelCase__ : PixaStructVisionConfig , **UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
__lowerCAmelCase: Any = copy.deepcopy(self.__dict__)
__lowerCAmelCase: str = self.text_config.to_dict()
__lowerCAmelCase: Optional[int] = self.vision_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 108 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence"),
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
if self.config_name == "default":
__lowerCAmelCase: Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
__lowerCAmelCase: Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=False)-> str:
'''simple docstring'''
if gpus is None:
__lowerCAmelCase: Union[str, Any] = 1 if torch.cuda.is_available() else 0
__lowerCAmelCase: Dict = {"src": sources, "mt": predictions, "ref": references}
__lowerCAmelCase: Union[str, Any] = [dict(zip(UpperCamelCase__ , UpperCamelCase__)) for t in zip(*data.values())]
__lowerCAmelCase , __lowerCAmelCase: str = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__)
return {"mean_score": mean_score, "scores": scores}
| 108 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A_ :
def lowercase ( self : str , snake_case_ : int ):
raise NotImplementedError()
def lowercase ( self : Any ):
raise NotImplementedError()
class A_ ( lowerCAmelCase_ ):
def __init__( self : str , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowercase ( self : Tuple , snake_case_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowercase ( self : Optional[int] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = ""
_UpperCAmelCase = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowercase ( self : Any , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="" if not stream_end else None )
def lowercase ( self : Optional[Any] , snake_case_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[Any] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : str ):
return self
def lowercase ( self : Any ):
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 22 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 1 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase( _a ):
pass
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Any = data
_lowercase : Node | None = None
def __iter__( self) -> Tuple:
"""simple docstring"""
_lowercase : str = self
_lowercase : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase)
yield node.data
_lowercase : List[str] = node.next_node
@property
def UpperCamelCase ( self) -> bool:
"""simple docstring"""
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = Node(1)
SCREAMING_SNAKE_CASE : int = Node(2)
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(3)
SCREAMING_SNAKE_CASE : List[str] = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE : Optional[int] = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE : List[str] = Node(5)
SCREAMING_SNAKE_CASE : Any = Node(6)
SCREAMING_SNAKE_CASE : str = Node(5)
SCREAMING_SNAKE_CASE : Dict = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE : List[str] = Node(1)
print(root_node.has_loop) # False
| 354 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84 | 0 |
'''simple docstring'''
from math import factorial
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowercase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Any =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Any =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str =RealmTokenizer
def __init__( self : Union[str, Any], __lowercase : Optional[Any]=None, __lowercase : List[str]=None, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]="[UNK]", __lowercase : int="[SEP]", __lowercase : Optional[int]="[PAD]", __lowercase : Optional[int]="[CLS]", __lowercase : Any="[MASK]", __lowercase : Union[str, Any]=True, __lowercase : List[Any]=None, **__lowercase : int, ):
super().__init__(
__lowercase, tokenizer_file=__lowercase, do_lower_case=__lowercase, unk_token=__lowercase, sep_token=__lowercase, pad_token=__lowercase, cls_token=__lowercase, mask_token=__lowercase, tokenize_chinese_chars=__lowercase, strip_accents=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", __lowercase ) != do_lower_case
or normalizer_state.get("strip_accents", __lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars", __lowercase ) != tokenize_chinese_chars
):
lowercase__ = getattr(__lowercase, normalizer_state.pop("type" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**__lowercase )
lowercase__ = do_lower_case
def A__ ( self : str, __lowercase : Tuple, **__lowercase : Any ):
lowercase__ = PaddingStrategy.MAX_LENGTH
lowercase__ = text
lowercase__ = kwargs.pop("text_pair", __lowercase )
lowercase__ = kwargs.pop("return_tensors", __lowercase )
lowercase__ = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
lowercase__ = batch_text_pair[idx]
else:
lowercase__ = None
lowercase__ = super().__call__(__lowercase, __lowercase, return_tensors=__lowercase, **__lowercase )
lowercase__ = encoded_candidates.get("input_ids" )
lowercase__ = encoded_candidates.get("attention_mask" )
lowercase__ = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
lowercase__ = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase, tensor_type=__lowercase )
def A__ ( self : str, __lowercase : str, __lowercase : Dict=None ):
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self : str, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
| 224 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowercase_ = parser.parse_args()
if args.model_type == "roberta":
lowercase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase_ = """roberta"""
elif args.model_type == "gpt2":
lowercase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase_ = """transformer"""
lowercase_ = model.state_dict()
lowercase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase_ = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase_ = F'{prefix}.embeddings.{w}.weight'
lowercase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase_ = F'{prefix}.embeddings.LayerNorm.{w}'
lowercase_ = state_dict[param_name]
# Transformer Blocks #
lowercase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowercase_ = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase_ = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase_ = state_dict[F'lm_head.dense.{w}']
lowercase_ = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase_ = state_dict[F'{prefix}.ln_f.{w}']
lowercase_ = state_dict["""lm_head.weight"""]
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 224 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : str=None ,_snake_case : Union[str, Any]=True ,_snake_case : Union[str, Any]=False ) -> Any:
"""simple docstring"""
if rouge_types is None:
lowercase__ : int = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase__ : List[str] = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ ,use_stemmer=UpperCAmelCase_ )
if use_aggregator:
lowercase__ : List[str] = scoring.BootstrapAggregator()
else:
lowercase__ : List[Any] = []
for ref, pred in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ : str = scorer.score(UpperCAmelCase_ ,UpperCAmelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase_ )
else:
scores.append(UpperCAmelCase_ )
if use_aggregator:
lowercase__ : int = aggregator.aggregate()
else:
lowercase__ : Any = {}
for key in scores[0]:
lowercase__ : Optional[Any] = [score[key] for score in scores]
return result
| 16 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 169 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""summarization"""
A__ : Optional[int] =["""loss"""]
A__ : Optional[Any] =ROUGE_KEYS
A__ : str ="""rouge2"""
def __init__( self : List[str] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'metrics.json'
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.config.model_type
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
SCREAMING_SNAKE_CASE__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ = get_git_info()['repo_sha']
SCREAMING_SNAKE_CASE__ = hparams.num_workers
SCREAMING_SNAKE_CASE__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ = self.model.config.max_length
SCREAMING_SNAKE_CASE__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A_ ( self : List[str] , UpperCAmelCase_ : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
SCREAMING_SNAKE_CASE__ = True
return readable_batch
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : List[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch['input_ids'], batch['attention_mask']
SCREAMING_SNAKE_CASE__ = batch['labels']
if isinstance(self.model , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.model._shift_right(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def A_ ( self : Dict ):
return self.tokenizer.pad_token_id
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ = losses['loss']
SCREAMING_SNAKE_CASE__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
SCREAMING_SNAKE_CASE__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def A_ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ = (time.time() - ta) / batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(batch['labels'] )
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : List[str] ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix='test' )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def A_ ( self : str ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def A_ ( self : int ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
'--max_source_length' , default=1024 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--max_tokens_per_batch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument('--logger_name' , type=UpperCAmelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=UpperCAmelCase_ , default='summarization' , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument('--src_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--eval_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
'--val_metric' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""translation"""
A__ : Dict =["""loss"""]
A__ : Optional[int] =["""bleu"""]
A__ : Union[str, Any] ="""bleu"""
def __init__( self : Tuple , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hparams.src_lang
SCREAMING_SNAKE_CASE__ = hparams.tgt_lang
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase_ )
check_output_dir(UpperCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ = SummarizationModule(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = TranslationModule(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
SCREAMING_SNAKE_CASE__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = os.environ.get('WANDB_PROJECT' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=UpperCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = args.val_metric == 'loss'
SCREAMING_SNAKE_CASE__ = generic_train(
UpperCamelCase_ , UpperCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase_ ) , early_stopping_callback=UpperCamelCase_ , logger=UpperCamelCase_ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase_ ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 169 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =ShapEPipeline
lowercase_ : List[Any] =['''prompt''']
lowercase_ : int =['''prompt''']
lowercase_ : Union[str, Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 8
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**A__)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**A__)
return model
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowercase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A__ ( self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(A__)
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**A__ ,num_images_per_prompt=A__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''')
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = pipe(
'''a shark''' ,generator=A__ ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Tuple = datasets.Audio()
__lowerCamelCase : List[str] = "audio"
__lowerCamelCase : Optional[int] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Optional[int] = AudioClassification(audio_column="audio" ,label_column="label" )
_SCREAMING_SNAKE_CASE = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE = AUDIO_EXTENSIONS
| 158 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
lowercase__ = f.readlines()
lowercase__ = F"""class {class_name}("""
lowercase__ = F"""{4 * " "}def {test_name}("""
lowercase__ = F"""{8 * " "}{correct_line.split()[0]}"""
lowercase__ = F"""{16 * " "}{correct_line.split()[0]}"""
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 0
lowercase__ = 0
lowercase__ = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
lowercase__ = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
lowercase__ = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
lowercase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowercase__ = lowercase__ = lowercase__ = lowercase__ = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
lowercase__ = {l.strip() for l in f.readlines()}
else:
lowercase__ = None
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
lowercase__ = f.readlines()
lowercase__ = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
lowercase__ , lowercase__ , lowercase__ , lowercase__ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
lowercase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 358 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269 | 0 |
"""simple docstring"""
from __future__ import annotations
__A = list[list[int]]
# assigning initial values to the grid
__A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase__ ( lowercase__ : Matrix , lowercase__ : int , lowercase__ : int , lowercase__ : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase__ ( lowercase__ : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase__ ( lowercase__ : Matrix ):
if location := find_empty_location(lowercase__ ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case : Any = digit
if sudoku(lowercase__ ) is not None:
return grid
snake_case : Optional[Any] = 0
return None
def UpperCamelCase__ ( lowercase__ : Matrix ):
for row in grid:
for cell in row:
print(lowercase__ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 148 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Union[str, Any] = XLMRobertaTokenizer
a__ : Optional[int] = XLMRobertaTokenizerFast
a__ : List[str] = True
a__ : List[Any] = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "<pad>"
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[str] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Dict = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : List[str] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE , f.name )
snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Tuple = pickle.dumps(SCREAMING_SNAKE_CASE )
pickle.loads(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Dict = self.get_rust_tokenizer()
snake_case : Optional[Any] = "I was born in 92000, and this is falsé."
snake_case : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
snake_case : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE )
snake_case : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = "Hello World!"
snake_case : Optional[Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : Dict = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 148 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A : str = logging.get_logger(__name__)
A : str = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a__ ( __UpperCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ = model_type_to_module_name(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = importlib.import_module(F'''.{module_name}''' , "transformers.models" )
try:
return getattr(__UpperCamelCase , __UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__UpperCamelCase , "__name__" , __UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ = importlib.import_module("transformers" )
if hasattr(__UpperCamelCase , __UpperCamelCase ):
return getattr(__UpperCamelCase , __UpperCamelCase )
return None
def a__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
SCREAMING_SNAKE_CASE_ = get_file_from_repo(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__UpperCamelCase , encoding="utf-8" ) as reader:
return json.load(__UpperCamelCase )
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int ) -> List[Any]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__magic_name__ )
def __A ( cls : List[str] , __magic_name__ : Dict , **__magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = kwargs.pop("config" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = kwargs.pop("trust_remote_code" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ImageProcessingMixin.get_image_processor_dict(__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = config_dict.get("image_processor_type" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE_ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE_ = config_dict.pop("feature_extractor_type" , __magic_name__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
SCREAMING_SNAKE_CASE_ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE_ = config_dict["auto_map"]["AutoFeatureExtractor"]
SCREAMING_SNAKE_CASE_ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(__magic_name__ , **__magic_name__ )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , "image_processor_type" , __magic_name__ )
if hasattr(__magic_name__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE_ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
SCREAMING_SNAKE_CASE_ = image_processor_class_from_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE_ = image_processor_class is not None or type(__magic_name__ ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE_ = resolve_trust_remote_code(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ = get_class_from_dynamic_module(
__magic_name__ , __magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = kwargs.pop("code_revision" , __magic_name__ )
if os.path.isdir(__magic_name__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__magic_name__ , **__magic_name__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(__magic_name__ , **__magic_name__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__magic_name__ ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE_ = IMAGE_PROCESSOR_MAPPING[type(__magic_name__ )]
return image_processor_class.from_dict(__magic_name__ , **__magic_name__ )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __A ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] ) -> List[str]:
IMAGE_PROCESSOR_MAPPING.register(__magic_name__ , __magic_name__ )
| 305 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305 | 1 |
import math
def UpperCAmelCase_ ( __snake_case ) -> bool:
"""simple docstring"""
return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num
def UpperCAmelCase_ ( __snake_case ) -> bool:
"""simple docstring"""
_lowercase =0
_lowercase =n
while left <= right:
_lowercase =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowercase =mid - 1
else:
_lowercase =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
"""simple docstring"""
import math
A : List[str] = 1_0
A : List[str] = 7
A : List[str] = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCamelCase ( _UpperCamelCase = 20 ):
'''simple docstring'''
__lowerCAmelCase = math.comb(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _UpperCamelCase )
__lowerCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(2_0))
| 259 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = 2
while True:
__lowerCAmelCase = factor_map.pop(_UpperCamelCase , _UpperCamelCase )
if factor:
__lowerCAmelCase = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase = factor
else:
__lowerCAmelCase = prime
yield prime
prime += 1
def _lowerCamelCase ( _UpperCamelCase = 1e10 ):
'''simple docstring'''
__lowerCAmelCase = sieve()
__lowerCAmelCase = 1
while True:
__lowerCAmelCase = next(_UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 259 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """▁"""
UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase = {
"""facebook/xglm-564M""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Any = None , **__UpperCamelCase : Dict , ) -> None:
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCamelCase = 7
_UpperCamelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCamelCase = len(self.sp_model )
_UpperCamelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ) -> List[str]:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Tuple = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any = None , __UpperCamelCase : List[Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def _UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self : Optional[int] ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Tuple ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self : int , __UpperCamelCase : str ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self : int , __UpperCamelCase : Tuple ) -> str:
_UpperCamelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 256 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class __lowerCamelCase ( __a ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = SqueezeBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
lowercase_ = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
lowercase_ = do_lower_case
lowercase_ = strip_accents
lowercase_ = tokenize_chinese_chars
lowercase_ = normalizer_class(**UpperCamelCase__ )
lowercase_ = do_lower_case
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 369 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Any=13 ,A_ : List[Any]=30 ,A_ : List[str]=2 ,A_ : List[Any]=3 ,A_ : int=True ,A_ : str=True ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : List[str]=4 ,A_ : Union[str, Any]=37 ,A_ : int="gelu" ,A_ : Optional[int]=0.1 ,A_ : Optional[int]=0.1 ,A_ : Tuple=10 ,A_ : List[str]=0.02 ,A_ : Union[str, Any]=3 ,A_ : Dict=None ,A_ : Optional[Any]=2 ,) -> Optional[Any]:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A = (image_size // patch_size) ** 2
A = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : int ,A_ : Any ) -> str:
A = DeiTModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : Optional[int] ,A_ : str ) -> Union[str, Any]:
A = DeiTForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = DeiTForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(A_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any ,A_ : Tuple ,A_ : str ) -> Any:
A = self.type_sequence_label_size
A = DeiTForImageClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = DeiTForImageClassification(A_ )
model.to(A_ )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCamelCase: Optional[Any] = False
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = DeiTModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ ,nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : str ,A_ : Tuple=False ) -> Optional[int]:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A = model_class(A_ )
model.to(A_ )
model.train()
A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ )
A = model(**A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ )
A = model(**A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A_ ),
*get_values(A_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
A = problem_type['title']
A = problem_type['num_labels']
A = model_class(A_ )
model.to(A_ )
model.train()
A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if problem_type["num_labels"] > 1:
A = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
A = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A_ ) as warning_list:
A = model(**A_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = DeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
A = model(**A_ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' ,torch_dtype=torch.floataa ,device_map='auto' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='pt' )
A = inputs.pixel_values.to(A_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A = model(A_ ) | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , UpperCamelCase : Dict=-1 ) -> List[Any]:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[int] = label_idx
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = mode.value
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
else:
lowerCAmelCase__ : Optional[int] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( self : Any , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Union[str, Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowerCAmelCase ( self : str , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[str] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = mode.value
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[Any] = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 212 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 212 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.