code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->list[int]:
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(__lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowerCamelCase ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 58
|
"""simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( a_ :str) -> Optional[int]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __A ( ) -> Tuple:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
__a : int = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def __A ( a_ :Tuple) -> Union[str, Any]:
__a : List[Any] = [1, 2]
__a : Optional[int] = {'''a''': 1, '''b''': 2}
__a : Dict = {'''a''': [1, 2], '''b''': [3, 4]}
__a : List[str] = {'''a''': {'''1''': 1}, '''b''': 2}
__a : Union[str, Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__a : str = [2, 3]
__a : str = {'''a''': 2, '''b''': 3}
__a : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
__a : Optional[Any] = {'''a''': {'''1''': 2}, '''b''': 3}
__a : List[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 188
|
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__snake_case = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'albert'
def __init__( self , UpperCamelCase_=30000 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=12 , UpperCamelCase_=1 , UpperCamelCase_=64 , UpperCamelCase_=16384 , UpperCamelCase_=1 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_="absolute" , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=3 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :int = embedding_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Tuple = num_hidden_groups
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Any = inner_group_num
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :int = max_position_embeddings
UpperCamelCase__ :str = type_vocab_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Tuple = layer_norm_eps
UpperCamelCase__ :int = classifier_dropout_prob
UpperCamelCase__ :Optional[Any] = position_embedding_type
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 97
|
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 97
| 1
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase :Any = TypeVar('''T''')
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return (position - 1) // 2
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
__magic_name__ : list[tuple[T, int]] = []
__magic_name__ : dict[T, int] = {}
__magic_name__ : int = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[Any] ) -> str:
return str(self.heap )
def __lowerCAmelCase ( self : List[Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __lowerCAmelCase ( self : Dict , _A : T , _A : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__magic_name__ : List[str] = self.elements
self.elements += 1
self._bubble_up(_A )
def __lowerCAmelCase ( self : List[Any] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__magic_name__ : Optional[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__magic_name__ : Tuple = self.heap[0]
self._bubble_down(_A )
return elem
def __lowerCAmelCase ( self : Dict , _A : T , _A : int ) -> None:
# Update the weight of the given key
__magic_name__ : Optional[Any] = self.position_map[elem]
__magic_name__ : Any = (elem, weight)
if position > 0:
__magic_name__ : Optional[int] = get_parent_position(_A )
__magic_name__ : Any = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def __lowerCAmelCase ( self : Optional[int] , _A : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__magic_name__ : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
__magic_name__ : Optional[Any] = get_parent_position(_A )
__magic_name__ : Any = self.heap[curr_pos]
__magic_name__ : int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A , _A )
return self._bubble_up(_A )
return None
def __lowerCAmelCase ( self : int , _A : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__magic_name__ : Dict = self.position_map[elem]
__magic_name__ : List[str] = self.heap[curr_pos]
__magic_name__ : str = get_child_left_position(_A )
__magic_name__ : List[str] = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
__magic_name__ : int = self.heap[child_left_position]
__magic_name__ : int = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
if child_left_position < self.elements:
__magic_name__ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
__magic_name__ : str = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A , _A )
return self._bubble_down(_A )
return None
def __lowerCAmelCase ( self : int , _A : int , _A : int ) -> None:
# Swap the nodes at the given positions
__magic_name__ : List[Any] = self.heap[nodea_pos][0]
__magic_name__ : Union[str, Any] = self.heap[nodea_pos][0]
__magic_name__ : Optional[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__magic_name__ : Optional[Any] = nodea_pos
__magic_name__ : Optional[Any] = nodea_pos
class _lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
__magic_name__ : dict[T, dict[T, int]] = {}
__magic_name__ : int = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Union[str, Any] ) -> int:
return self.nodes
def __lowerCAmelCase ( self : Dict , _A : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__magic_name__ : str = {}
self.nodes += 1
def __lowerCAmelCase ( self : List[str] , _A : T , _A : T , _A : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_A )
self.add_node(_A )
__magic_name__ : Any = weight
__magic_name__ : Dict = weight
def lowerCamelCase ( lowerCAmelCase : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
__magic_name__ : dict[T, int] = {node: maxsize for node in graph.connections}
__magic_name__ : dict[T, T | None] = {node: None for node in graph.connections}
__magic_name__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase , lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__magic_name__ : Any = priority_queue.extract_min()
__magic_name__ : List[str] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__magic_name__ : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
__magic_name__ : Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
__magic_name__ : Tuple = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__magic_name__ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
__magic_name__ : Union[str, Any] = node
return dist, parent
| 369
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase :Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
__magic_name__ : List[Any] = [image]
__magic_name__ : List[Any] = [trans(img.convert('RGB' ) ) for img in image]
__magic_name__ : Dict = torch.stack(lowerCAmelCase )
return image
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , _A : str , _A : int ) -> Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] ) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def __lowerCAmelCase ( self : Any , _A : List[str] , _A : Optional[Any] , _A : int ) -> List[Any]:
# get the original timestep using init_timestep
__magic_name__ : Tuple = min(int(num_inference_steps * strength ) , _A )
__magic_name__ : Any = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[int] , _A : Tuple , _A : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' )
__magic_name__ : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__magic_name__ : Tuple = init_latents.shape
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print('add noise to latents at timestep' , _A )
__magic_name__ : List[str] = self.scheduler.add_noise(_A , _A , _A )
__magic_name__ : List[str] = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , _A : Union[torch.FloatTensor, PIL.Image.Image] = None , _A : float = 0.8 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 50 , _A : Optional[bool] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_A )
# 2. Preprocess image
__magic_name__ : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
__magic_name__ , __magic_name__ : Dict = self.get_timesteps(_A , _A , self.device )
__magic_name__ : Dict = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
__magic_name__ : Optional[Any] = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
__magic_name__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
__magic_name__ : Dict = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__magic_name__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A )
| 275
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = XCLIPTextConfig()
# derive patch size from model name
SCREAMING_SNAKE_CASE_: int = model_name.find("patch" )
SCREAMING_SNAKE_CASE_: List[str] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
SCREAMING_SNAKE_CASE_: Union[str, Any] = XCLIPVisionConfig(patch_size=_UpperCAmelCase , num_frames=_UpperCAmelCase )
if "large" in model_name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = 7_68
SCREAMING_SNAKE_CASE_: List[str] = 30_72
SCREAMING_SNAKE_CASE_: str = 12
SCREAMING_SNAKE_CASE_: int = 10_24
SCREAMING_SNAKE_CASE_: List[Any] = 40_96
SCREAMING_SNAKE_CASE_: str = 16
SCREAMING_SNAKE_CASE_: Dict = 24
SCREAMING_SNAKE_CASE_: Optional[Any] = 7_68
SCREAMING_SNAKE_CASE_: Any = 30_72
if model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE_: List[Any] = 3_36
SCREAMING_SNAKE_CASE_: Optional[int] = XCLIPConfig.from_text_vision_configs(_UpperCAmelCase , _UpperCAmelCase )
if "large" in model_name:
SCREAMING_SNAKE_CASE_: Optional[int] = 7_68
return config
def A_ ( _UpperCAmelCase ):
# text encoder
if name == "token_embedding.weight":
SCREAMING_SNAKE_CASE_: int = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
SCREAMING_SNAKE_CASE_: Any = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_: List[str] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_: Dict = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
SCREAMING_SNAKE_CASE_: Any = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
SCREAMING_SNAKE_CASE_: str = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_: int = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
SCREAMING_SNAKE_CASE_: int = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
SCREAMING_SNAKE_CASE_: str = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
SCREAMING_SNAKE_CASE_: Tuple = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
SCREAMING_SNAKE_CASE_: Any = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
SCREAMING_SNAKE_CASE_: Dict = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
SCREAMING_SNAKE_CASE_: str = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
SCREAMING_SNAKE_CASE_: Tuple = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[Any] = orig_state_dict.pop(_UpperCAmelCase )
if "attn.in_proj" in key:
SCREAMING_SNAKE_CASE_: Optional[int] = key.split("." )
if key.startswith("visual" ):
SCREAMING_SNAKE_CASE_: Optional[int] = key_split[3]
SCREAMING_SNAKE_CASE_: Dict = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
SCREAMING_SNAKE_CASE_: Union[str, Any] = val[
:dim, :
]
SCREAMING_SNAKE_CASE_: Dict = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: Any = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE_: Dict = val[
:dim
]
SCREAMING_SNAKE_CASE_: Optional[Any] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: List[Any] = val[
-dim:
]
else:
if "weight" in key:
SCREAMING_SNAKE_CASE_: List[str] = val[
:dim, :
]
SCREAMING_SNAKE_CASE_: str = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: int = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE_: Dict = val[:dim]
SCREAMING_SNAKE_CASE_: int = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Union[str, Any] = val[-dim:]
elif key.startswith("mit" ):
SCREAMING_SNAKE_CASE_: List[Any] = key_split[2]
SCREAMING_SNAKE_CASE_: Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: Tuple = val[:dim, :]
SCREAMING_SNAKE_CASE_: Dict = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Optional[int] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: List[Any] = val[:dim]
SCREAMING_SNAKE_CASE_: List[str] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: str = val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any = key_split[2]
SCREAMING_SNAKE_CASE_: Optional[Any] = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: List[str] = val[:dim, :]
SCREAMING_SNAKE_CASE_: List[Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: List[str] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Tuple = val[:dim]
SCREAMING_SNAKE_CASE_: List[str] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Optional[Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = rename_key(_UpperCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
SCREAMING_SNAKE_CASE_: Dict = val.T
SCREAMING_SNAKE_CASE_: Any = val
return orig_state_dict
def A_ ( _UpperCAmelCase ):
if num_frames == 8:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
SCREAMING_SNAKE_CASE_: str = "eating_spaghetti.npy"
elif num_frames == 32:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti_32_frames.npy"
SCREAMING_SNAKE_CASE_: List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_UpperCAmelCase , repo_type="dataset" , )
SCREAMING_SNAKE_CASE_: str = np.load(_UpperCAmelCase )
return list(_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
SCREAMING_SNAKE_CASE_: Tuple = model_to_url[model_name]
SCREAMING_SNAKE_CASE_: int = 8
if "16-frames" in model_name:
SCREAMING_SNAKE_CASE_: int = 16
elif "shot" in model_name:
SCREAMING_SNAKE_CASE_: int = 32
SCREAMING_SNAKE_CASE_: int = get_xclip_config(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = XCLIPModel(_UpperCAmelCase )
model.eval()
if "drive" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[str] = "pytorch_model.bin"
gdown.cached_download(_UpperCAmelCase , _UpperCAmelCase , quiet=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = torch.load(_UpperCAmelCase , map_location="cpu" )["model"]
else:
SCREAMING_SNAKE_CASE_: str = torch.hub.load_state_dict_from_url(_UpperCAmelCase )["model"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = XCLIPModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = 3_36 if model_name == "xclip-large-patch14-16-frames" else 2_24
SCREAMING_SNAKE_CASE_: int = VideoMAEImageProcessor(size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
SCREAMING_SNAKE_CASE_: Tuple = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
SCREAMING_SNAKE_CASE_: Optional[Any] = XCLIPProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = prepare_video(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_UpperCAmelCase , return_tensors="pt" , padding=_UpperCAmelCase )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] = model(**_UpperCAmelCase )
# Verify outputs
SCREAMING_SNAKE_CASE_: Any = outputs.logits_per_video
SCREAMING_SNAKE_CASE_: str = logits_per_video.softmax(dim=1 )
print("Probs:" , _UpperCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
SCREAMING_SNAKE_CASE_: Any = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
SCREAMING_SNAKE_CASE_: Dict = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
SCREAMING_SNAKE_CASE_: int = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(_UpperCAmelCase , organization="nielsr" )
processor.push_to_hub(_UpperCAmelCase , organization="nielsr" )
slow_tokenizer.push_to_hub(_UpperCAmelCase , organization="nielsr" )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : Dict = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowercase = AutoencoderKL
__lowercase = """sample"""
__lowercase = 1e-2
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 4
_snake_case = 3
_snake_case = (32, 32)
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
return {"sample": image}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.model_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
_snake_case = model(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_snake_case = torch.randn_like(lowerCAmelCase_ )
_snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_snake_case = self.model_class(**lowerCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_snake_case = model_a(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_snake_case = dict(model.named_parameters() )
_snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase_ )
_snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
_snake_case = model.to(lowerCAmelCase_ )
model.eval()
if torch_device == "mps":
_snake_case = torch.manual_seed(0 )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_snake_case = image.to(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ , generator=lowerCAmelCase_ ).sample
_snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_snake_case = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
_snake_case = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_snake_case = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase_ ) for s in shape] )}.npy'
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 3, 5_12, 5_12) , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = torch.floataa if fpaa else torch.floataa
_snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) ).to(lowerCAmelCase_ ).to(lowerCAmelCase_ )
return image
def lowerCamelCase ( self , lowerCAmelCase_="CompVis/stable-diffusion-v1-4" , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = "fp16" if fpaa else None
_snake_case = torch.floataa if fpaa else torch.floataa
_snake_case = AutoencoderKL.from_pretrained(
lowerCAmelCase_ , subfolder='vae' , torch_dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ , )
model.to(lowerCAmelCase_ ).eval()
return model
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase_ )
return torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model()
_snake_case = self.get_sd_image(lowerCAmelCase_ )
_snake_case = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , generator=lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_snake_case = self.get_sd_image(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_snake_case = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , generator=lowerCAmelCase_ , sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_snake_case = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model()
_snake_case = self.get_sd_image(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ ).sample
assert sample.shape == image.shape
_snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model()
_snake_case = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
_snake_case = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_snake_case = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_snake_case = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
_snake_case = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model()
_snake_case = self.get_sd_image(lowerCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_snake_case = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.get_sd_vae_model()
_snake_case = self.get_sd_image(lowerCAmelCase_ )
_snake_case = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model.encode(lowerCAmelCase_ ).latent_dist
_snake_case = dist.sample(generator=lowerCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
_snake_case = torch.tensor(lowerCAmelCase_ )
_snake_case = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ )
| 354
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = CanineTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_snake_case = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertIn('token_type_ids' , lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0XE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_5
_snake_case = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = chr(0XE_0_0_5 )
_snake_case = chr(0XE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0XE_0_0_7
_snake_case = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_snake_case = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 160
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCAmelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCAmelCase = 0
_lowerCAmelCase = 0xe_000
_lowerCAmelCase = 0xe_001
_lowerCAmelCase = 0xe_002
_lowerCAmelCase = 0xe_003
_lowerCAmelCase = 0xe_004
# Maps special codepoints to human-readable names.
_lowerCAmelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCAmelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=chr(__lowerCamelCase ) ,__UpperCAmelCase=False ,__UpperCAmelCase=2048 ,**__UpperCAmelCase ,) -> Dict:
lowerCAmelCase__ : Optional[int] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else bos_token
lowerCAmelCase__ : str = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token
lowerCAmelCase__ : Tuple = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else sep_token
lowerCAmelCase__ : int = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cls_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Any = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,model_max_length=__lowerCamelCase ,**__lowerCamelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ : Tuple = UNICODE_VOCAB_SIZE
lowerCAmelCase__ : int = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__lowerCamelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return "".join(__lowerCamelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
lowerCAmelCase__ : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Any:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
lowerCAmelCase__ : str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple:
return ()
| 37
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case_ : int ) ->Tuple:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def __lowercase ( snake_case_ : str ) ->Dict:
'''simple docstring'''
for char in word:
__A : int = ord(snake_case_ )
if not _is_chinese_char(snake_case_ ):
return 0
return 1
def __lowercase ( snake_case_ : List[str] ) ->List[Any]:
'''simple docstring'''
__A : str = set()
for token in tokens:
__A : List[Any] = len(snake_case_ ) > 1 and is_chinese(snake_case_ )
if chinese_word:
word_set.add(snake_case_ )
__A : Any = list(snake_case_ )
return word_list
def __lowercase ( snake_case_ : List[str] ,snake_case_ : set() ) ->Any:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__A : List[Any] = max([len(snake_case_ ) for w in chinese_word_set] )
__A : List[str] = bert_tokens
__A , __A : Any = 0, len(snake_case_ )
while start < end:
__A : str = True
if is_chinese(bert_word[start] ):
__A : int = min(end - start ,snake_case_ )
for i in range(snake_case_ ,1 ,-1 ):
__A : Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
__A : Any = '''##''' + bert_word[j]
__A : Optional[int] = start + i
__A : str = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case_ : List[str] ,snake_case_ : LTP ,snake_case_ : BertTokenizer ) ->Dict:
'''simple docstring'''
__A : Optional[Any] = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : int = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__A : List[Any] = [get_chinese_word(snake_case_ ) for r in res]
ltp_res.extend(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
__A : Any = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : Tuple = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=snake_case_ ,truncation=snake_case_ ,max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case_ ) == len(snake_case_ )
__A : Optional[int] = []
for input_ids, chinese_word in zip(snake_case_ ,snake_case_ ):
__A : List[str] = []
for id in input_ids:
__A : Tuple = bert_tokenizer._convert_id_to_token(snake_case_ )
input_tokens.append(snake_case_ )
__A : Optional[int] = add_sub_symbol(snake_case_ ,snake_case_ )
__A : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case_ ):
if token[:2] == "##":
__A : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(snake_case_ ) == 1 and _is_chinese_char(ord(snake_case_ ) ):
ref_id.append(snake_case_ )
ref_ids.append(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
return ref_ids
def __lowercase ( snake_case_ : int ) ->List[Any]:
'''simple docstring'''
with open(args.file_name ,'''r''' ,encoding='''utf-8''' ) as f:
__A : List[str] = f.readlines()
__A : Optional[Any] = [line.strip() for line in data if len(snake_case_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__A : str = LTP(args.ltp ) # faster in GPU device
__A : Optional[int] = BertTokenizer.from_pretrained(args.bert )
__A : Optional[Any] = prepare_ref(snake_case_ ,snake_case_ ,snake_case_ )
with open(args.save_path ,'''w''' ,encoding='''utf-8''' ) as f:
__A : int = [json.dumps(snake_case_ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a_ = parser.parse_args()
main(args)
| 179
| 0
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
__magic_name__ : Tuple = b * b - 4 * a * c
__magic_name__ : Tuple = (-b + sqrt(lowerCAmelCase )) / (2 * a)
__magic_name__ : List[Any] = (-b - sqrt(lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : List[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 275
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Any=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : str = len(set_a.intersection(lowerCAmelCase ) )
if alternative_union:
__magic_name__ : List[str] = len(lowerCAmelCase ) + len(lowerCAmelCase )
else:
__magic_name__ : Any = len(set_a.union(lowerCAmelCase ) )
return intersection / union
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(lowerCAmelCase , (list, tuple) ):
__magic_name__ : str = [element for element in set_a if element in set_b]
if alternative_union:
__magic_name__ : Dict = len(lowerCAmelCase ) + len(lowerCAmelCase )
return len(lowerCAmelCase ) / union
else:
__magic_name__ : Any = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase :Dict = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCAmelCase :Tuple = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 275
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCAmelCase ( a__):
'''simple docstring'''
random.seed(a__)
np.random.seed(a__)
torch.manual_seed(a__)
torch.cuda.manual_seed_all(a__)
# ^^ safe to call this function even if cuda is not available
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = 0.9_9_9_9 , _lowercase = 0.0 , _lowercase = 0 , _lowercase = False , _lowercase = 1.0 , _lowercase = 2 / 3 , _lowercase = None , _lowercase = None , **_lowercase , ) -> str:
if isinstance(_lowercase , torch.nn.Module ):
a_ : int = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , _lowercase , standard_warn=_lowercase , )
a_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
a_ : Any = True
if kwargs.get("""max_value""" , _lowercase ) is not None:
a_ : List[str] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
a_ : List[str] = kwargs["""max_value"""]
if kwargs.get("""min_value""" , _lowercase ) is not None:
a_ : str = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
a_ : Optional[Any] = kwargs["""min_value"""]
a_ : Tuple = list(_lowercase )
a_ : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , _lowercase ) is not None:
a_ : Tuple = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
self.to(device=kwargs["""device"""] )
a_ : Optional[int] = None
a_ : Dict = decay
a_ : Union[str, Any] = min_decay
a_ : List[str] = update_after_step
a_ : Optional[Any] = use_ema_warmup
a_ : List[str] = inv_gamma
a_ : int = power
a_ : List[Any] = 0
a_ : Tuple = None # set in `step()`
a_ : Dict = model_cls
a_ : Union[str, Any] = model_config
@classmethod
def UpperCamelCase__ ( cls , _lowercase , _lowercase ) -> "EMAModel":
a_ , a_ : List[str] = model_cls.load_config(_lowercase , return_unused_kwargs=_lowercase )
a_ : Optional[Any] = model_cls.from_pretrained(_lowercase )
a_ : Any = cls(model.parameters() , model_cls=_lowercase , model_config=model.config )
ema_model.load_state_dict(_lowercase )
return ema_model
def UpperCamelCase__ ( self , _lowercase ) -> List[str]:
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
a_ : Union[str, Any] = self.model_cls.from_config(self.model_config )
a_ : str = self.state_dict()
state_dict.pop("""shadow_params""" , _lowercase )
model.register_to_config(**_lowercase )
self.copy_to(model.parameters() )
model.save_pretrained(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> float:
a_ : List[str] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
a_ : List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
a_ : str = (1 + step) / (10 + step)
a_ : Dict = min(_lowercase , self.decay )
# make sure decay is not smaller than min_decay
a_ : Union[str, Any] = max(_lowercase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCamelCase__ ( self , _lowercase ) -> str:
if isinstance(_lowercase , torch.nn.Module ):
a_ : Any = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , _lowercase , standard_warn=_lowercase , )
a_ : Dict = parameters.parameters()
a_ : List[str] = list(_lowercase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
a_ : int = self.get_decay(self.optimization_step )
a_ : List[str] = decay
a_ : Optional[int] = 1 - decay
a_ : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowercase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
a_ : Tuple = deepspeed.zero.GatheredParameters(_lowercase , modifier_rank=_lowercase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
a_ : List[Any] = list(_lowercase )
for s_param, param in zip(self.shadow_params , _lowercase ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None ) -> None:
a_ : Dict = [
p.to(device=_lowercase , dtype=_lowercase ) if p.is_floating_point() else p.to(device=_lowercase )
for p in self.shadow_params
]
def UpperCamelCase__ ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCamelCase__ ( self , _lowercase ) -> None:
a_ : Optional[int] = [param.detach().cpu().clone() for param in parameters]
def UpperCamelCase__ ( self , _lowercase ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , _lowercase ):
param.data.copy_(c_param.data )
# Better memory-wise.
a_ : Union[str, Any] = None
def UpperCamelCase__ ( self , _lowercase ) -> None:
a_ : List[str] = copy.deepcopy(_lowercase )
a_ : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
a_ : List[Any] = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , _lowercase ):
raise ValueError("""Invalid min_decay""" )
a_ : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , _lowercase ):
raise ValueError("""Invalid optimization_step""" )
a_ : Any = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , _lowercase ):
raise ValueError("""Invalid update_after_step""" )
a_ : List[Any] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _lowercase ):
raise ValueError("""Invalid use_ema_warmup""" )
a_ : Optional[Any] = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
a_ : Union[str, Any] = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
a_ : Optional[Any] = state_dict.get("""shadow_params""" , _lowercase )
if shadow_params is not None:
a_ : Optional[int] = shadow_params
if not isinstance(self.shadow_params , _lowercase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(_lowercase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 248
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Tuple = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=a__)
a_ : Any = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=a__)
env_command_parser(subparsers=a__)
launch_command_parser(subparsers=a__)
tpu_command_parser(subparsers=a__)
test_command_parser(subparsers=a__)
# Let's go
a_ : Any = parser.parse_args()
if not hasattr(a__ , """func"""):
parser.print_help()
exit(1)
# Run
args.func(a__)
if __name__ == "__main__":
main()
| 248
| 1
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
A_ : set[int] = {ord(char) for char in VALID_CHARS}
A_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = ""
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
lowerCamelCase__ : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
lowerCamelCase__ : Union[str, Any] = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_ ( _lowerCamelCase = "p059_cipher.txt" ):
lowerCamelCase__ : list[int]
lowerCamelCase__ : list[str]
lowerCamelCase__ : str
lowerCamelCase__ : str
lowerCamelCase__ : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding='utf-8' )
lowerCamelCase__ : Optional[Any] = [int(_lowerCamelCase ) for number in data.strip().split(',' )]
lowerCamelCase__ : Tuple = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
lowerCamelCase__ : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 316
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 1
|
UpperCAmelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def UpperCAmelCase_ ( __snake_case ) -> list[list[int]]:
"""simple docstring"""
_lowercase =len(__snake_case ) * [False]
_lowercase ={vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_lowercase =[]
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_lowercase =[]
_lowercase =len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_lowercase =order[len(__snake_case ) - i - 1]
if not visited[vert]:
_lowercase =find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 5
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 0
|
def __snake_case ( _UpperCAmelCase ):
__a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 131
|
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : list[int] ):
"""simple docstring"""
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_snake_case , _snake_case , _snake_case : Optional[Any] = equationa
_snake_case , _snake_case , _snake_case : Optional[Any] = equationa
# Calculate the determinants of the matrices
_snake_case : Any = aa * ba - aa * ba
_snake_case : Optional[int] = ca * ba - ca * ba
_snake_case : Tuple = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : int = determinant_x / determinant
_snake_case : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 64
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347
| 0
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 353
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image:
'''simple docstring'''
__UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowercase : Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 232
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Tuple = PegasusConfig
UpperCamelCase_ : str = {}
UpperCamelCase_ : str = '''gelu'''
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[Any]=9_9 , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Optional[int]=3_7 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Dict=4_0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : List[str]=0 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Any = seq_length
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : str = pad_token_id
_UpperCAmelCase : Optional[int] = bos_token_id
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
_UpperCAmelCase : Optional[int] = inputs_dict["input_ids"]
_UpperCAmelCase : List[Any] = input_ids[:1, :]
_UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
_UpperCAmelCase : int = 1
# first forward pass
_UpperCAmelCase : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-3 )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Tuple, a_: List[Any], a_: Optional[Any]=None, a_: Any=None, a_: Tuple=None, a_: Optional[int]=None, a_: Tuple=None, ):
if attention_mask is None:
_UpperCAmelCase : str = tf.cast(tf.math.not_equal(a_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_UpperCAmelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase_ : List[str] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : int = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : int = False
UpperCamelCase_ : Dict = False
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = TFPegasusModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase_ : List[str] = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase_ : str = '''google/pegasus-xsum'''
@cached_property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCAmelCase ( self : int , **lowerCAmelCase__ : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="tf" )
_UpperCAmelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 17
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 17
| 1
|
import math
import qiskit
def UpperCAmelCase__ ( _A : int = 1 , _A : int = 1 , _A : int = 1 ):
'''simple docstring'''
if (
isinstance(_A , _A )
or isinstance(_A , _A )
or isinstance(_A , _A )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_A ) != input_a)
or (math.floor(_A ) != input_a)
or (math.floor(_A ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
a__ =qiskit.QuantumRegister(4 , '''qr''' )
a__ =qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
a__ =[input_a, input_a, carry_in]
a__ =qiskit.QuantumCircuit(_A , _A )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_A ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_A ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_A ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _A ) # measure the last two qbits
a__ =qiskit.Aer.get_backend('''aer_simulator''' )
a__ =qiskit.execute(_A , _A , shots=10_00 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 188
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188
| 1
|
def __lowercase ( __lowerCAmelCase : list ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__lowerCAmelCase ) == 1:
return True
a__ = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowercase ( __lowerCAmelCase : list ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
a__ = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Union[str, Any] =1
lowerCamelCase_ : Dict =3
lowerCamelCase_ : Tuple =(32, 32)
lowerCamelCase_ : int =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCAmelCase__ ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase_ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : str ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase_ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case__ )
@property
def UpperCAmelCase__ ( self : List[str] ):
def extract(*snake_case__ : Any , **snake_case__ : Dict ):
class lowercase__ :
def __init__( self : Dict ):
lowerCamelCase_ : int =torch.ones([0] )
def UpperCAmelCase__ ( self : str , snake_case__ : Tuple ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Any ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any =self.dummy_cond_unet
lowerCamelCase_ : Union[str, Any] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
lowerCamelCase_ : Optional[int] =self.dummy_vae
lowerCamelCase_ : int =self.dummy_text_encoder
lowerCamelCase_ : Union[str, Any] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[Any] =StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : int =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Any =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Any =sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : Any =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : int =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Optional[Any] =np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Tuple =self.dummy_cond_unet
lowerCamelCase_ : Any =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Optional[Any] =self.dummy_vae
lowerCamelCase_ : List[str] =self.dummy_text_encoder
lowerCamelCase_ : Any =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] =StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : str =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : List[Any] =sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCamelCase_ : str =output.images
lowerCamelCase_ : Optional[Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Dict =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
lowerCamelCase_ : int =image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : List[str] =np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Union[str, Any] =StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(pipe.scheduler , snake_case__ )
assert pipe.safety_checker is None
lowerCamelCase_ : Any =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCamelCase_ : List[Any] =StableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Tuple =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Optional[int] =self.dummy_cond_unet
lowerCamelCase_ : str =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.dummy_vae
lowerCamelCase_ : Union[str, Any] =self.dummy_text_encoder
lowerCamelCase_ : Union[str, Any] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCamelCase_ : Optional[int] =unet.half()
lowerCamelCase_ : Any =vae.half()
lowerCamelCase_ : Any =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[str] =StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : str ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Dict =sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=snake_case__ )
lowerCamelCase_ : List[str] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ : Dict =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =(
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCamelCase_ : Dict =40_0366_0346
lowerCamelCase_ : List[Any] =7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
lowerCamelCase_ : List[Any] =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
lowerCamelCase_ : str =[0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
lowerCamelCase_ : List[str] =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =[0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=snake_case__ )
lowerCamelCase_ : Optional[int] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase_ : List[str] =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict ="padme amidala taking a bath artwork, safe for work, no nudity"
lowerCamelCase_ : List[Any] =27_3497_1755
lowerCamelCase_ : str =7
lowerCamelCase_ : Dict =torch.manual_seed(snake_case__ )
lowerCamelCase_ : Optional[int] =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : Any =image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[Any] =[0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowerCamelCase_ : List[Any] =torch.manual_seed(snake_case__ )
lowerCamelCase_ : Optional[int] =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : int =output.images
lowerCamelCase_ : str =image[0, -3:, -3:, -1]
lowerCamelCase_ : Dict =[0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[Any] =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCamelCase_ : Union[str, Any] =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] =(
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCamelCase_ : Union[str, Any] =10_4435_5234
lowerCamelCase_ : Tuple =12
lowerCamelCase_ : List[str] =torch.manual_seed(snake_case__ )
lowerCamelCase_ : Optional[int] =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowerCamelCase_ : Optional[Any] =torch.manual_seed(snake_case__ )
lowerCamelCase_ : Dict =sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 144
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 1
|
def _a ( a :int = 600_851_475_143 ) -> int:
try:
a = int(a )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26
| 1
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
A : List[Any] = logging.getLogger(__name__)
A : Tuple = 'pytorch_model.bin'
@dataclasses.dataclass
class A :
'''simple docstring'''
A__ = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class A :
'''simple docstring'''
A__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
A__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''The name of the task to train on.'''} , )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class A :
'''simple docstring'''
A__ = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
A__ = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
A__ = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
A__ = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
A__ = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
A__ = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
A__ = dataclasses.field(
default=1_00 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
A__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowercase__ = dataset.filter(lambda __magic_name__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowercase__ = int(eval_result * len(__magic_name__ ) )
print(__magic_name__ )
lowercase__ = dataset.sort("""probability""" , reverse=__magic_name__ )
lowercase__ = dataset.select(range(__magic_name__ ) )
lowercase__ = dataset.remove_columns(["""label""", """probability"""] )
lowercase__ = dataset.rename_column("""prediction""" , """label""" )
lowercase__ = dataset.map(lambda __magic_name__ : {"label": idalabel[example["label"]]} )
lowercase__ = dataset.shuffle(seed=args.seed )
lowercase__ = os.path.join(__magic_name__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__magic_name__ , index=__magic_name__ )
else:
dataset.to_json(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Any , **__magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowercase__ = STModelArguments(model_name_or_path=__magic_name__ )
lowercase__ = STDataArguments(train_file=__magic_name__ , infer_file=__magic_name__ )
lowercase__ = STTrainingArguments(output_dir=__magic_name__ )
lowercase__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__magic_name__ ).items():
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
for key, value in kwargs.items():
if hasattr(__magic_name__ , __magic_name__ ):
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
# Sanity checks
lowercase__ = {}
lowercase__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowercase__ = args.train_file
lowercase__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowercase__ = args.eval_file
for key in data_files:
lowercase__ = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
lowercase__ = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
lowercase__ = f'''{args.output_dir}/self-train_iter-{{}}'''.format
lowercase__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
accelerator.wait_for_everyone()
lowercase__ = None
lowercase__ = None
lowercase__ = 0
lowercase__ = False
# Show the progress bar
lowercase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowercase__ = data_dir_format(__magic_name__ )
assert os.path.exists(__magic_name__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowercase__ = os.path.join(__magic_name__ , """stage-1""" )
lowercase__ = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__magic_name__ , __magic_name__ ):
arguments_dict.update({key: value} )
lowercase__ = os.path.join(__magic_name__ , """best-checkpoint""" , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __magic_name__ , __magic_name__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __magic_name__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowercase__ = os.path.join(__magic_name__ , """best-checkpoint""" )
lowercase__ = os.path.join(__magic_name__ , """stage-2""" )
# Update arguments_dict
lowercase__ = model_path
lowercase__ = data_files["""train"""]
lowercase__ = current_output_dir
lowercase__ = os.path.join(__magic_name__ , """best-checkpoint""" , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __magic_name__ , __magic_name__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __magic_name__ )
lowercase__ = iteration
lowercase__ = data_dir_format(iteration + 1 )
lowercase__ = AutoConfig.from_pretrained(os.path.join(__magic_name__ , """best-checkpoint""" ) )
lowercase__ = config.idalabel
lowercase__ = os.path.join(__magic_name__ , """eval_results_best-checkpoint.json""" )
lowercase__ = os.path.join(__magic_name__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(__magic_name__ )
with open(__magic_name__ , """r""" ) as f:
lowercase__ = float(json.load(__magic_name__ )[args.eval_metric] )
lowercase__ = os.path.join(__magic_name__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(__magic_name__ )
# Loading the dataset from local csv or json files.
lowercase__ = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
lowercase__ = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__magic_name__ ):
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.wait_for_everyone()
lowercase__ = os.path.join(__magic_name__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowercase__ = eval_result
if best_iteration is None:
lowercase__ = new_iteration
lowercase__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowercase__ = new_iteration
lowercase__ = new_eval_result
lowercase__ = 0
else:
if new_eval_result == best_eval_result:
lowercase__ = new_iteration
lowercase__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowercase__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , __magic_name__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(__magic_name__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__magic_name__ , """eval_results_best-iteration.json""" ) , )
| 305
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( __a , __a=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =None
if token is not None:
lowerCamelCase__: List[Any] ={"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase__: Tuple =F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCamelCase__: List[str] =requests.get(__a , headers=__a ).json()
lowerCamelCase__: Dict ={}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCamelCase__: Any =math.ceil((result["total_count"] - 100) / 100 )
for i in range(__a ):
lowerCamelCase__: int =requests.get(url + F"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCAmelCase_ ( __a , __a=None ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Tuple =None
if token is not None:
lowerCamelCase__: Optional[Any] ={"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase__: Optional[Any] =F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCamelCase__: List[Any] =requests.get(__a , headers=__a ).json()
lowerCamelCase__: Optional[Any] ={}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
lowerCamelCase__: Dict =math.ceil((result["total_count"] - 100) / 100 )
for i in range(__a ):
lowerCamelCase__: Dict =requests.get(url + F"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[Any] =None
if token is not None:
lowerCamelCase__: int ={"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase__: Tuple =requests.get(__a , headers=__a , allow_redirects=__a )
lowerCamelCase__: List[Any] =result.headers["Location"]
lowerCamelCase__: str =requests.get(__a , allow_redirects=__a )
lowerCamelCase__: Optional[int] =os.path.join(__a , F"""{artifact_name}.zip""" )
with open(__a , "wb" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( __a , __a=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Tuple =[]
lowerCamelCase__: int =None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
lowerCamelCase__: int =line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase__: Optional[int] =line[: line.index(": " )]
lowerCamelCase__: Union[str, Any] =line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
lowerCamelCase__: int =line[len("FAILED " ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
lowerCamelCase__: Tuple =line
if len(__a ) != len(__a ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
F"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem." )
lowerCamelCase__: int =None
if job_name and job_links:
lowerCamelCase__: Any =job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase__: int =[x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def lowerCAmelCase_ ( __a , __a=None ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: int =[os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def lowerCAmelCase_ ( __a , __a=None ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Any =Counter()
counter.update([x[1] for x in logs] )
lowerCamelCase__: str =counter.most_common()
lowerCamelCase__: List[Any] ={}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase__: Optional[int] ={"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase__: Any =dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: str =test.split("::" )[0]
if test.startswith("tests/models/" ):
lowerCamelCase__: Tuple =test.split("/" )[2]
else:
lowerCamelCase__: List[str] =None
return test
def lowerCAmelCase_ ( __a , __a=None ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCamelCase__: int =[x for x in logs if x[2] is not None]
lowerCamelCase__: int ={x[2] for x in logs}
lowerCamelCase__: Optional[Any] ={}
for test in tests:
lowerCamelCase__: List[Any] =Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCamelCase__: List[str] =counter.most_common()
lowerCamelCase__: Any ={error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase__: List[str] =sum(error_counts.values() )
if n_errors > 0:
lowerCamelCase__: str ={"count": n_errors, "errors": error_counts}
lowerCamelCase__: int =dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] ="| no. | error | status |"
lowerCamelCase__: Dict ="|-:|:-|:-|"
lowerCamelCase__: str =[header, sep]
for error in reduced_by_error:
lowerCamelCase__: Optional[int] =reduced_by_error[error]["count"]
lowerCamelCase__: int =F"""| {count} | {error[:100]} | |"""
lines.append(__a )
return "\n".join(__a )
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] ="| model | no. of errors | major error | count |"
lowerCamelCase__: int ="|-:|-:|-:|-:|"
lowerCamelCase__: Union[str, Any] =[header, sep]
for model in reduced_by_model:
lowerCamelCase__: List[str] =reduced_by_model[model]["count"]
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =list(reduced_by_model[model]["errors"].items() )[0]
lowerCamelCase__: Dict =F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__A = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A = get_job_links(args.workflow_run_id, token=args.token)
__A = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A = k.find(" / ")
__A = k[index + len(" / ") :]
__A = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A = reduce_by_error(errors)
__A = reduce_by_model(errors)
__A = make_github_table(reduced_by_error)
__A = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 273
|
from math import pow
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__: Optional[Any] =int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__: int =backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__: Dict =backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
lowercase , lowercase , lowercase , lowercase :Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase :int = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
lowercase :Optional[Any] = config_class.from_json_file(lowerCamelCase )
lowercase :str = True
lowercase :Dict = True
print(F"Building TensorFlow model from configuration: {config}" )
lowercase :Optional[Any] = model_class(lowerCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase :Tuple = cached_file(
lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase :List[Any] = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase, lowerCamelCase )
if compare_with_pt_model:
lowercase :List[str] = tf_model(tf_model.dummy_inputs, training=lowerCamelCase ) # build the network
lowercase :Optional[int] = torch.load(lowerCamelCase, map_location="cpu" )
lowercase :Dict = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase, config=lowerCamelCase, state_dict=lowerCamelCase )
with torch.no_grad():
lowercase :List[Any] = pt_model(**pt_model.dummy_inputs )
lowercase :Optional[Any] = pto[0].numpy()
lowercase :Tuple = tfo[0].numpy()
lowercase :Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(lowerCamelCase, save_format="h5" )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, ):
if args_model_type is None:
lowercase :Optional[Any] = list(MODEL_CLASSES.keys() )
else:
lowercase :Optional[Any] = [args_model_type]
for j, model_type in enumerate(lowerCamelCase, start=1 ):
print("=" * 100 )
print(F" Converting model type {j}/{len(lowerCamelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
lowercase , lowercase , lowercase , lowercase , lowercase :Union[str, Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase :Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase :Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase, lowerCamelCase ), start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
lowercase :Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(lowerCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase :Optional[int] = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
else:
lowercase :str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase :Optional[int] = cached_file(lowerCamelCase, lowerCamelCase, force_download=not use_cached_models )
else:
lowercase :Any = model_shortcut_name
if os.path.isfile(lowerCamelCase ):
lowercase :Tuple = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase, pytorch_checkpoint_path=lowerCamelCase, config_file=lowerCamelCase, tf_dump_path=os.path.join(lowerCamelCase, model_shortcut_name + "-tf_model.h5" ), compare_with_pt_model=lowerCamelCase, )
if remove_cached_files:
os.remove(lowerCamelCase )
os.remove(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
_UpperCAmelCase : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236
|
import os
import pytest
from attr import dataclass
_UpperCAmelCase : List[str] = "us-east-1" # defaults region
@dataclass
class __lowerCAmelCase :
_a = 42
_a = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_a = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
_a = {**hyperparameters, '''max_steps''': 1000}
@property
def SCREAMING_SNAKE_CASE ( self: str ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return F"{self.framework}-transfromers-test"
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 236
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase__ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
_UpperCamelCase : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(__a , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
_UpperCamelCase : str = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Optional[Any] , __a : Dict , __a : Optional[Any] , __a : Optional[Any]=None ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_UpperCamelCase : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_UpperCamelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase : List[str] = black.format_str(__a , mode=__a )
_UpperCamelCase : Dict = os.path.join(self.transformer_dir , "new_code.py" )
with open(__a , "w" , newline="\n" ) as f:
f.write(__a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__a )
with open(__a , "r" ) as f:
self.assertTrue(f.read() , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __a ) , )
# Copy consistency with a really long name
_UpperCamelCase : str = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __a , __a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __a , overwrite_result=re.sub("Bert" , "TestModel" , __a ) , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Union[str, Any] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_UpperCamelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_UpperCamelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCamelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_UpperCamelCase, _UpperCamelCase : List[str] = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
self.assertFalse(__a )
self.assertEqual(__a , __a )
_UpperCamelCase, _UpperCamelCase : List[Any] = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a )
_UpperCamelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_UpperCamelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCamelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCamelCase, _UpperCamelCase : Optional[Any] = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__a , __a )
| 366
|
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87
|
UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87
| 1
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : int = HfArgumentParser(_lowercase )
a : int = parser.parse_args_into_dataclasses()[0]
a : Any = TensorFlowBenchmark(args=_lowercase )
try:
a : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a : Tuple = " ".join(str(_lowercase ).split(" " )[:-1] )
a : Any = ""
a : Any = eval(str(_lowercase ).split(" " )[-1] )
a : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 79
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class _UpperCAmelCase:
def __init__( self) -> None:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 0
def UpperCAmelCase ( self) -> bool:
'''simple docstring'''
return self.head == self.tail
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
self.data.append(__a)
_UpperCamelCase = self.tail + 1
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.data[self.head]
_UpperCamelCase = self.head + 1
return ret
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.tail - self.head
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
print(self.data)
print('''**************''')
print(self.data[self.head : self.tail])
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.data
def UpperCAmelCase ( self) -> MyNode | None:
'''simple docstring'''
return self.left
def UpperCAmelCase ( self) -> MyNode | None:
'''simple docstring'''
return self.right
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.height
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = data
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = node
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = node
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = height
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowerCamelCase__ ( __snake_case ) -> MyNode:
"""simple docstring"""
print('''left rotation node:''', node.get_data() )
_UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def lowerCamelCase__ ( __snake_case ) -> MyNode:
"""simple docstring"""
print('''right rotation node:''', node.get_data() )
_UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def lowerCamelCase__ ( __snake_case ) -> MyNode:
"""simple docstring"""
_UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__snake_case ) )
return right_rotation(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> MyNode:
"""simple docstring"""
_UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__snake_case ) )
return left_rotation(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), __snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
else:
node.set_right(insert_node(node.get_right(), __snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase = rl_rotation(__snake_case )
else:
_UpperCamelCase = left_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
return node
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
while True:
_UpperCamelCase = root.get_right()
if right_child is None:
break
_UpperCamelCase = right_child
return root.get_data()
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
while True:
_UpperCamelCase = root.get_left()
if left_child is None:
break
_UpperCamelCase = left_child
return root.get_data()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> MyNode | None:
"""simple docstring"""
_UpperCamelCase = root.get_left()
_UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase = get_left_most(__snake_case )
root.set_data(__snake_case )
root.set_right(del_node(__snake_case, __snake_case ) )
elif left_child is not None:
_UpperCamelCase = left_child
elif right_child is not None:
_UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__snake_case, __snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__snake_case, __snake_case ) )
if get_height(__snake_case ) - get_height(__snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase = left_rotation(__snake_case )
else:
_UpperCamelCase = rl_rotation(__snake_case )
elif get_height(__snake_case ) - get_height(__snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(__snake_case )
return root
class _UpperCAmelCase:
def __init__( self) -> None:
'''simple docstring'''
_UpperCamelCase = None
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return get_height(self.root)
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
print('''insert:''' + str(__a))
_UpperCamelCase = insert_node(self.root , __a)
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
print('''delete:''' + str(__a))
if self.root is None:
print('''Tree is empty!''')
return
_UpperCamelCase = del_node(self.root , __a)
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
_UpperCamelCase = ''''''
_UpperCamelCase = MyQueue()
q.push(self.root)
_UpperCamelCase = self.get_height()
if layer == 0:
return output
_UpperCamelCase = 0
while not q.is_empty():
_UpperCamelCase = q.pop()
_UpperCamelCase = ''' ''' * int(math.pow(2 , layer - 1))
output += space
if node is None:
output += "*"
q.push(__a)
q.push(__a)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
_UpperCamelCase = cnt + 1
for i in range(1_00):
if cnt == math.pow(2 , __a) - 1:
_UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_a = AVLtree()
_a = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 194
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=5 ) -> Union[str, Any]:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
_UpperCamelCase = torch.tensor(tokenizer.encode(__snake_case, add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
_UpperCamelCase = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
_UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCamelCase = logits[0, masked_index, :]
_UpperCamelCase = logits.softmax(dim=0 )
_UpperCamelCase , _UpperCamelCase = prob.topk(k=__snake_case, dim=0 )
_UpperCamelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
_UpperCamelCase = tokenizer.mask_token
_UpperCamelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_UpperCamelCase = predicted_token_bpe.replace('''\u2581''', ''' ''' )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__snake_case ), __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case, __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_a = CamembertTokenizer.from_pretrained("""camembert-base""")
_a = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
_a = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 194
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 65
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionLDMaDPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : str = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Dict:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : List[Any] = rgb[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : Tuple = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A_ : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = 3 * [inputs['''prompt''']]
# forward
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Tuple = rgb_slice_a[0, -3:, -3:, -1]
A_ : Optional[Any] = depth_slice_a[0, -3:, -1]
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = 3 * [inputs.pop('''prompt''' )]
A_ : Tuple = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
A_ : Dict = text_inputs['''input_ids'''].to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[int] = prompt_embeds
# forward
A_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Any = output.rgb, output.depth
A_ : Any = rgb_slice_a[0, -3:, -3:, -1]
A_ : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.get_dummy_components()
A_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ : int = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = '''french fries'''
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Optional[Any] = rgb[0, -3:, -3:, -1]
A_ : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : int = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A_ : Any = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : str = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
A_ : Optional[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1].flatten()
A_ : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
A_ : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A_ : Tuple = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
A_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : List[Any] = output.rgb, output.depth
A_ : int = 0.4_9_5_5_8_6
A_ : Union[str, Any] = 0.3_3_7_9_5_5_1_5
A_ : Optional[int] = 1_1_2.4_8_5_1_8
A_ : Optional[Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : Tuple = 0.4_1_9_4_1_2_7
A_ : Union[str, Any] = 0.3_5_3_7_5_5_8_6
A_ : Union[str, Any] = 0.5_6_3_8_5_0_2
A_ : List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 65
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[Any] =logging.get_logger(__name__)
_A : int ={'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _lowercase ( lowercase_ ):
a = """ctrl"""
a = ["""past_key_values"""]
a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=246_534 , UpperCamelCase__: Union[str, Any]=256 , UpperCamelCase__: Optional[int]=1_280 , UpperCamelCase__: Any=8_192 , UpperCamelCase__: List[str]=48 , UpperCamelCase__: List[Any]=16 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: str=1e-6 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[Any] = n_positions
lowerCamelCase__ : Optional[Any] = n_embd
lowerCamelCase__ : Any = n_layer
lowerCamelCase__ : List[str] = n_head
lowerCamelCase__ : str = dff
lowerCamelCase__ : List[str] = resid_pdrop
lowerCamelCase__ : Dict = embd_pdrop
lowerCamelCase__ : Optional[int] = layer_norm_epsilon
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[Any] = use_cache
super().__init__(**UpperCamelCase__ )
| 41
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : List[Any] = PegasusConfig
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = "gelu"
def __init__( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : Optional[int]=7, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : Dict=9_9, UpperCAmelCase__ : List[str]=3_2, UpperCAmelCase__ : Tuple=2, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Union[str, Any]=3_7, UpperCAmelCase__ : str=0.1, UpperCAmelCase__ : Optional[Any]=0.1, UpperCAmelCase__ : List[str]=4_0, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : List[str]=1, UpperCAmelCase__ : Tuple=0, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def _lowercase ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__lowercase = tf.concat([input_ids, eos_tensor], axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
__lowercase = prepare_pegasus_inputs_dict(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return config, inputs_dict
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple ):
__lowercase = TFPegasusModel(config=UpperCAmelCase__ ).get_decoder()
__lowercase = inputs_dict["input_ids"]
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict["attention_mask"][:1, :]
__lowercase = inputs_dict["head_mask"]
__lowercase = 1
# first forward pass
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, head_mask=UpperCAmelCase__, use_cache=UpperCAmelCase__ )
__lowercase ,__lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3), config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens], axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ )[0]
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__, UpperCAmelCase__, rtol=1E-3 )
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any=None, UpperCamelCase_ : Union[str, Any]=None, UpperCamelCase_ : List[str]=None, UpperCamelCase_ : List[Any]=None, UpperCamelCase_ : Dict=None, ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(UpperCamelCase_, config.pad_token_id), tf.inta)
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.inta),
], axis=-1, )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCAmelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
def _lowercase ( self : Tuple ):
__lowercase = TFPegasusModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__UpperCAmelCase : List[str] = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCAmelCase : Dict = "google/pegasus-xsum"
@cached_property
def _lowercase ( self : int ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self : int ):
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self : str, **UpperCAmelCase__ : Tuple ):
__lowercase = self.translate_src_text(**UpperCAmelCase__ )
assert self.expected_text == generated_words
def _lowercase ( self : Tuple, **UpperCAmelCase__ : Any ):
__lowercase = self.tokenizer(self.src_text, **UpperCAmelCase__, padding=UpperCAmelCase__, return_tensors="tf" )
__lowercase = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=UpperCAmelCase__, )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=UpperCAmelCase__ )
return generated_words
@slow
def _lowercase ( self : Dict ):
self._assert_generated_batch_equal_expected()
| 17
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17
| 1
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ : Any = get_logger(__name__)
A_ : Dict = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowerCAmelCase:
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowerCAmelCase:
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
for processor in self:
UpperCamelCase_: int = inspect.signature(processor.__call__ ).parameters
if len(_lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase_: Any = processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
else:
UpperCamelCase_: Optional[Any] = processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase_: int = temperature
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = scores / self.temperature
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = -float('Inf' ) , _lowerCamelCase = 1 ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase_: List[str] = top_p
UpperCamelCase_: List[Any] = filter_value
UpperCamelCase_: Optional[Any] = min_tokens_to_keep
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = lax.top_k(_lowerCamelCase , scores.shape[-1] )
UpperCamelCase_: Any = jnp.full_like(_lowerCamelCase , self.filter_value )
UpperCamelCase_: Optional[int] = jax.nn.softmax(_lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase_: Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase_: Dict = jnp.roll(_lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCamelCase )
# min tokens to keep
UpperCamelCase_: List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = jnp.where(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = jax.lax.sort_key_val(_lowerCamelCase , _lowerCamelCase )[-1]
return next_scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = -float('Inf' ) , _lowerCamelCase = 1 ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase_: List[str] = max(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = filter_value
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: int = scores.shape
UpperCamelCase_: Optional[int] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase_: List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase_ ,UpperCamelCase_: Any = lax.top_k(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Any = jnp.broadcast_to((jnp.arange(_lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase_: str = topk_scores.flatten()
UpperCamelCase_: Optional[Any] = topk_indices.flatten() + shift
UpperCamelCase_: str = next_scores_flat.at[topk_indices_flat].set(_lowerCamelCase )
UpperCamelCase_: List[str] = next_scores_flat.reshape(_lowerCamelCase , _lowerCamelCase )
return next_scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Dict = bos_token_id
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase_: Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase_: List[str] = jnp.where(_lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCamelCase )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = max_length
UpperCamelCase_: Union[str, Any] = eos_token_id
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase_: Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase_: Optional[Any] = jnp.where(_lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCamelCase )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase_: Any = min_length
UpperCamelCase_: List[Any] = eos_token_id
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase_: Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase_: Union[str, Any] = jnp.where(_lowerCamelCase , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _lowerCamelCase )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = list(_lowerCamelCase )
UpperCamelCase_: Dict = begin_index
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase_: Dict = jnp.where(_lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _lowerCamelCase )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = list(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = dict(_lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase_: Union[str, Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase_: Dict = force_token_array.at[index].set(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = jnp.intaa(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
def _force_token(_lowerCamelCase ):
UpperCamelCase_: List[str] = scores.shape[0]
UpperCamelCase_: Tuple = self.force_token_array[generation_idx]
UpperCamelCase_: Optional[Any] = jnp.ones_like(_lowerCamelCase , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase_: Optional[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase_: List[Any] = lax.dynamic_update_slice(_lowerCamelCase , _lowerCamelCase , (0, current_token) )
return new_scores
UpperCamelCase_: List[str] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCamelCase ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = generate_config.eos_token_id
UpperCamelCase_: List[str] = generate_config.no_timestamps_token_id
UpperCamelCase_: Any = generate_config.no_timestamps_token_id + 1
UpperCamelCase_: int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCamelCase , 'max_initial_timestamp_index' ):
UpperCamelCase_: int = generate_config.max_initial_timestamp_index
else:
UpperCamelCase_: Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase_: Optional[Any] = model_config.vocab_size
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase_: Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Any = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCamelCase , )
UpperCamelCase_: List[str] = jnp.where((cur_len - self.begin_index) < 2 , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCamelCase , _lowerCamelCase , )
return jnp.where(
_lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _lowerCamelCase , )
UpperCamelCase_: Dict = jax.vmap(_lowerCamelCase )(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[Any] = jnp.where(cur_len == self.begin_index , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCamelCase , )
UpperCamelCase_: List[str] = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase_: Union[str, Any] = jnp.where(
_lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase_: Dict = jax.nn.log_softmax(_lowerCamelCase , axis=-1 )
def handle_cumulative_probs(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase_: int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _lowerCamelCase , )
UpperCamelCase_: Tuple = jax.vmap(_lowerCamelCase )(_lowerCamelCase , _lowerCamelCase )
return scores
| 292
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ : Any = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case () -> Union[str, Any]:
UpperCamelCase_: Tuple = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_: List[str] = get_sagemaker_input()
else:
UpperCamelCase_: List[str] = get_cluster_input()
return config
def snake_case (UpperCAmelCase__=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase_: List[Any] = subparsers.add_parser('config' , description=UpperCAmelCase__ )
else:
UpperCamelCase_: List[Any] = argparse.ArgumentParser('Accelerate config command' , description=UpperCAmelCase__ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def snake_case (UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: Union[str, Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase_: Tuple = args.config_file
else:
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
UpperCamelCase_: Dict = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(UpperCAmelCase__ )
else:
config.to_yaml_file(UpperCAmelCase__ )
print(F'''accelerate configuration saved at {config_file}''' )
def snake_case () -> str:
UpperCamelCase_: Tuple = config_command_parser()
UpperCamelCase_: int = parser.parse_args()
config_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 292
| 1
|
from math import sqrt
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase = False
break
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'status' must been from type bool"
return status
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase = list(range(2 , n + 1 ) )
lowerCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase__ ) ):
for j in range(i + 1 , len(lowerCamelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase = 0
# filters actual prime numbers.
lowerCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase__ ):
ans.append(lowerCamelCase__ )
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase = 2
lowerCamelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase__ ):
while quotient != 1:
if is_prime(lowerCamelCase__ ) and (quotient % factor == 0):
ans.append(lowerCamelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase__ )
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase = 0
# prime factorization of 'number'
lowerCamelCase = prime_factorization(lowerCamelCase__ )
lowerCamelCase = max(lowerCamelCase__ )
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase = 0
# prime factorization of 'number'
lowerCamelCase = prime_factorization(lowerCamelCase__ )
lowerCamelCase = min(lowerCamelCase__ )
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (number > 2) and is_even(lowerCamelCase__ )
), "'number' must been an int, even and > 2"
lowerCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase = get_prime_numbers(lowerCamelCase__ )
lowerCamelCase = len(lowerCamelCase__ )
# run variable for while-loops.
lowerCamelCase = 0
lowerCamelCase = None
# exit variable. for break up the loops
lowerCamelCase = True
while i < len_pn and loop:
lowerCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (len(lowerCamelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase = 0
while numbera != 0:
lowerCamelCase = numbera % numbera
lowerCamelCase = numbera
lowerCamelCase = rest
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase = prime_factorization(lowerCamelCase__ )
lowerCamelCase = prime_factorization(lowerCamelCase__ )
elif numbera == 1 or numbera == 1:
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = max(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase = prime_fac_a.count(lowerCamelCase__ )
lowerCamelCase = prime_fac_a.count(lowerCamelCase__ )
for _ in range(max(lowerCamelCase__ , lowerCamelCase__ ) ):
ans *= n
else:
lowerCamelCase = prime_fac_a.count(lowerCamelCase__ )
for _ in range(lowerCamelCase__ ):
ans *= n
done.append(lowerCamelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase = prime_fac_a.count(lowerCamelCase__ )
for _ in range(lowerCamelCase__ ):
ans *= n
done.append(lowerCamelCase__ )
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase = 0
lowerCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase__ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and is_prime(
lowerCamelCase__ ), "'ans' must been a prime number and from type int"
return ans
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase__ ) and is_prime(lowerCamelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase = p_number_a + 1 # jump to the next number
lowerCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase__ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase__ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase__ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase = get_divisors(lowerCamelCase__ )
# precondition
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : int ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase = gcd(abs(lowerCamelCase__ ) , abs(lowerCamelCase__ ) )
# precondition
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase = ans
ans += fiba
lowerCamelCase = tmp
return ans
| 252
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : Optional[Any] = 32
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return int(x / 2**20 )
class __lowercase :
"""simple docstring"""
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(lowerCamelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252
| 1
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__a = ''
__a = ''
__a = ''
__a = ''
def __UpperCAmelCase ( a_: str ):
# authorize twitter, initialize tweepy
_UpperCAmelCase : str = tweepy.OAuthHandler(a_, a_ )
auth.set_access_token(a_, a_ )
_UpperCAmelCase : Optional[int] = tweepy.API(a_ )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase : Optional[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase : Optional[int] = api.user_timeline(screen_name=a_, count=200 )
# save most recent tweets
alltweets.extend(a_ )
# save the id of the oldest tweet less one
_UpperCAmelCase : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase : int = api.user_timeline(
screen_name=a_, count=200, max_id=a_ )
# save most recent tweets
alltweets.extend(a_ )
# update the id of the oldest tweet less one
_UpperCAmelCase : List[str] = alltweets[-1].id - 1
print(f"""...{len(a_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""", "w" ) as f:
_UpperCAmelCase : str = csv.writer(a_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(a_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 17
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : Optional[Any] = ""
while len(a_ ) % 3 != 0:
_UpperCAmelCase : List[Any] = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(a_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : Optional[Any] = 0
for index, val in enumerate(a_ ):
oct_val += int(2 ** (2 - index) * int(a_ ) )
oct_string += str(a_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_snake_case = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_snake_case = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCAmelCase_ ( ):
_A : int = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(snake_case_,snake_case_ )
_A : List[str] = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCAmelCase_ ( ):
_A : Any = """rougeLsum"""
_A : List[str] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( ):
_A : Dict = ["""rouge1""", """rouge2""", """rougeL"""]
_A : Dict = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_A : int = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ ) == calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ )
def lowerCAmelCase_ ( ):
_A : int = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_A : Any = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_A : Dict = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""],newline_sep=snake_case_ )["""rougeLsum"""]
_A : List[str] = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCAmelCase_ ( ):
_A : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ) )
assert isinstance(snake_case_,snake_case_ )
_A : Dict = calculate_rouge_path(
data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ),bootstrap_aggregation=snake_case_ )
assert isinstance(snake_case_,snake_case_ )
| 26
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase : Any = logging.getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
UpperCamelCase : Any = bnb_quantization_config.load_in_abit
UpperCamelCase : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase : Any = []
UpperCamelCase : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase : List[Any] = load_in_abit
UpperCamelCase : int = load_in_abit
UpperCamelCase : Tuple = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase : str = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCamelCase : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase : Any = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase : List[str] = {}
UpperCamelCase : Dict = special_dtypes
UpperCamelCase : Optional[Any] = no_split_module_classes
UpperCamelCase : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase : int = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = max_memory
UpperCamelCase : Any = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Any = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : Optional[Any] = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase : Union[str, Any] = '''.'''.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase : Any = module.weight.data
if module.bias is not None:
UpperCamelCase : Tuple = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCamelCase : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase : Any = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase : Dict = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Dict = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase : Optional[int] = False
if hasattr(SCREAMING_SNAKE_CASE_ , '''base_model_prefix''' ):
UpperCamelCase : Optional[Any] = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : Any = list(model.named_children() )
UpperCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase : Optional[int] = ['''.weight''', '''.bias''']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE_ , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def a ( SCREAMING_SNAKE_CASE_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = param_name
UpperCamelCase : Union[str, Any] = model
if "." in tensor_name:
UpperCamelCase : Any = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Any = new_module
UpperCamelCase : int = splits[-1]
# offload weights
UpperCamelCase : List[Any] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''meta''' , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : int ) -> float:
_lowerCAmelCase : Optional[Any] = u
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : Tuple = temp * (u - i)
return temp
def _UpperCAmelCase ( ) -> None:
_lowerCAmelCase : Tuple = int(input("""enter the numbers of values: """ ) )
_lowerCAmelCase : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
_lowerCAmelCase : Any = 0
print("""enter the values of parameters in a list: """ )
_lowerCAmelCase : List[str] = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = float(input() )
_lowerCAmelCase : Optional[Any] = int(input("""enter the value to interpolate: """ ) )
_lowerCAmelCase : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
_lowerCAmelCase : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_lowerCAmelCase : Tuple = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """gpt_neox_japanese"""
def __init__(self :Dict , _UpperCamelCase :Dict=3_2000 , _UpperCamelCase :int=2560 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :str=32 , _UpperCamelCase :int=4 , _UpperCamelCase :Any="gelu" , _UpperCamelCase :Dict=1.0_0 , _UpperCamelCase :List[str]=1_0000 , _UpperCamelCase :List[str]=2048 , _UpperCamelCase :Any=0.0_2 , _UpperCamelCase :List[Any]=1e-5 , _UpperCamelCase :str=True , _UpperCamelCase :str=3_1996 , _UpperCamelCase :Optional[int]=3_1999 , _UpperCamelCase :int=0.1 , _UpperCamelCase :Tuple=0.0 , **_UpperCamelCase :Optional[Any] , )-> Union[str, Any]:
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
__A = vocab_size
__A = max_position_embeddings
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_multiple_size
__A = hidden_act
__A = rotary_pct
__A = rotary_emb_base
__A = initializer_range
__A = layer_norm_eps
__A = use_cache
__A = attention_dropout
__A = hidden_dropout
| 371
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
| 0
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def lowercase_ ( __UpperCAmelCase ) -> datetime:
lowerCAmelCase__ : List[Any] = year % 19
lowerCAmelCase__ : List[Any] = year % 4
lowerCAmelCase__ : Optional[int] = year % 7
lowerCAmelCase__ : List[str] = math.floor(year / 100 )
lowerCAmelCase__ : Tuple = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ : List[Any] = leap_day_inhibits / 4
lowerCAmelCase__ : Any = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ : Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_A = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 242
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "Salesforce/blip-image-captioning-base"
_lowerCamelCase :int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase :List[Any] = "image_captioner"
_lowerCamelCase :Tuple = AutoModelForVisionaSeq
_lowerCamelCase :Dict = ["image"]
_lowerCamelCase :str = ["text"]
def __init__( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : "Image" ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 242
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = LayoutLMTokenizer
__UpperCAmelCase : Union[str, Any] = LayoutLMTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
_a : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = """UNwant\u00E9d,running"""
_a : Optional[int] = """unwanted, running"""
return input_text, output_text
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
| 350
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : int = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79
| 0
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
A_ = True
from torch.cuda.amp import autocast
A_ = logging.getLogger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any]=None, UpperCAmelCase__ : Optional[int]=None ) ->Tuple:
return field(default_factory=lambda: default, metadata=UpperCAmelCase__ )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
snake_case_ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
snake_case_ = field(
default=0.0_5 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
snake_case_ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case_ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
snake_case_ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def __call__( self : int , snake_case : List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
A__ : Any = [{"""input_values""": feature["""input_values"""]} for feature in features]
A__ : Optional[int] = [{"""input_ids""": feature["""labels"""]} for feature in features]
A__ : Optional[Any] = self.processor.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
A__ : List[str] = self.processor.pad(
labels=snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
A__ : Optional[Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
A__ : Dict = labels
return batch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : Optional[Any] , snake_case : nn.Module , snake_case : Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
A__ : Optional[int] = self._prepare_inputs(snake_case )
if self.use_amp:
with autocast():
A__ : Optional[Any] = self.compute_loss(snake_case , snake_case )
else:
A__ : str = self.compute_loss(snake_case , snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
A__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ : Dict = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
A__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case )
else:
loss.backward()
return loss.detach()
def _lowerCAmelCase ( ) ->Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""", UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
A__ : List[Any] = datasets.load_dataset(
"""common_voice""", data_args.dataset_config_name, split=data_args.train_split_name )
A__ : Optional[int] = datasets.load_dataset("""common_voice""", data_args.dataset_config_name, split="""test""" )
# Create and save tokenizer
A__ : List[str] = f'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(UpperCAmelCase__ : Dict ):
A__ : Optional[Any] = re.sub(UpperCAmelCase__, """""", batch["""sentence"""] ).lower() + """ """
return batch
A__ : Any = train_dataset.map(UpperCAmelCase__, remove_columns=["""sentence"""] )
A__ : List[Any] = eval_dataset.map(UpperCAmelCase__, remove_columns=["""sentence"""] )
def extract_all_chars(UpperCAmelCase__ : Dict ):
A__ : Any = """ """.join(batch["""text"""] )
A__ : Optional[Any] = list(set(UpperCAmelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
A__ : str = train_dataset.map(
UpperCAmelCase__, batched=UpperCAmelCase__, batch_size=-1, keep_in_memory=UpperCAmelCase__, remove_columns=train_dataset.column_names, )
A__ : int = train_dataset.map(
UpperCAmelCase__, batched=UpperCAmelCase__, batch_size=-1, keep_in_memory=UpperCAmelCase__, remove_columns=eval_dataset.column_names, )
A__ : int = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
A__ : str = {v: k for k, v in enumerate(UpperCAmelCase__ )}
A__ : Optional[Any] = vocab_dict[""" """]
del vocab_dict[" "]
A__ : List[str] = len(UpperCAmelCase__ )
A__ : Any = len(UpperCAmelCase__ )
with open("""vocab.json""", """w""" ) as vocab_file:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : List[Any] = WavaVecaCTCTokenizer(
"""vocab.json""", unk_token="""[UNK]""", pad_token="""[PAD]""", word_delimiter_token="""|""", )
A__ : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0.0, do_normalize=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__ )
A__ : Optional[Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
A__ : int = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction="""mean""", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
A__ : Tuple = min(len(UpperCAmelCase__ ), data_args.max_train_samples )
A__ : Optional[int] = train_dataset.select(range(UpperCAmelCase__ ) )
if data_args.max_val_samples is not None:
A__ : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
A__ : int = torchaudio.transforms.Resample(4_8_0_0_0, 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase__ : Any ):
A__ : Dict = torchaudio.load(batch["""path"""] )
A__ : Union[str, Any] = resampler(UpperCAmelCase__ ).squeeze().numpy()
A__ : Any = 1_6_0_0_0
A__ : Any = batch["""text"""]
return batch
A__ : Any = train_dataset.map(
UpperCAmelCase__, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
A__ : Optional[Any] = eval_dataset.map(
UpperCAmelCase__, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(UpperCAmelCase__ : List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
A__ : Any = processor(
audio=batch["""speech"""], text=batch["""target_text"""], sampling_rate=batch["""sampling_rate"""][0] )
batch.update(UpperCAmelCase__ )
return batch
A__ : Optional[int] = train_dataset.map(
UpperCAmelCase__, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=UpperCAmelCase__, num_proc=data_args.preprocessing_num_workers, )
A__ : int = eval_dataset.map(
UpperCAmelCase__, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=UpperCAmelCase__, num_proc=data_args.preprocessing_num_workers, )
# Metric
A__ : Optional[int] = datasets.load_metric("""wer""" )
def compute_metrics(UpperCAmelCase__ : List[str] ):
A__ : str = pred.predictions
A__ : List[Any] = np.argmax(UpperCAmelCase__, axis=-1 )
A__ : Dict = processor.tokenizer.pad_token_id
A__ : Optional[int] = processor.batch_decode(UpperCAmelCase__ )
# we do not want to group tokens when computing the metrics
A__ : Tuple = processor.batch_decode(pred.label_ids, group_tokens=UpperCAmelCase__ )
A__ : int = wer_metric.compute(predictions=UpperCAmelCase__, references=UpperCAmelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
A__ : Any = DataCollatorCTCWithPadding(processor=UpperCAmelCase__, padding=UpperCAmelCase__ )
# Initialize our Trainer
A__ : Any = CTCTrainer(
model=UpperCAmelCase__, data_collator=UpperCAmelCase__, args=UpperCAmelCase__, compute_metrics=UpperCAmelCase__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
A__ : Any = model_args.model_name_or_path
else:
A__ : int = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
A__ : List[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
A__ : int = train_result.metrics
A__ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
A__ : List[Any] = min(UpperCAmelCase__, len(UpperCAmelCase__ ) )
trainer.log_metrics("""train""", UpperCAmelCase__ )
trainer.save_metrics("""train""", UpperCAmelCase__ )
trainer.save_state()
# Evaluation
A__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : Dict = trainer.evaluate()
A__ : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase__ )
A__ : str = min(UpperCAmelCase__, len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""", UpperCAmelCase__ )
trainer.save_metrics("""eval""", UpperCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="attention" )-> Any:
"""simple docstring"""
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False )-> Any:
"""simple docstring"""
if split_mlp_wi:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
snake_case_ = (wi_a, wi_a)
else:
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
snake_case_ = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , *, SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = traverse_util.flatten_dict(variables['''target'''] )
snake_case_ = {'''/'''.join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE )
snake_case_ = collections.OrderedDict()
# Shared embeddings.
snake_case_ = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_attention_layer_norm''' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''attention''' )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (MLP).
snake_case_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_mlp_layer_norm''' )
snake_case_ , snake_case_ = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , SCREAMING_SNAKE_CASE )
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old[
'''encoder/relpos_bias/rel_embedding'''
].T
snake_case_ = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
snake_case_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_self_attention_layer_norm''' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''self_attention''' )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_cross_attention_layer_norm''' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''encoder_decoder_attention''' )
snake_case_ = layer_norm
snake_case_ = k.T
snake_case_ = o.T
snake_case_ = q.T
snake_case_ = v.T
# Block i, layer 2 (MLP).
snake_case_ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_mlp_layer_norm''' )
snake_case_ , snake_case_ = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , SCREAMING_SNAKE_CASE )
snake_case_ = layer_norm
if split_mlp_wi:
snake_case_ = wi[0].T
snake_case_ = wi[1].T
else:
snake_case_ = wi.T
snake_case_ = wo.T
snake_case_ = old['''decoder/decoder_norm/scale''']
snake_case_ = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
snake_case_ = state_dict['''shared.weight''']
return state_dict
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
snake_case_ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE )
snake_case_ = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False )-> Optional[Any]:
"""simple docstring"""
snake_case_ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ = TaEncoderModel(SCREAMING_SNAKE_CASE )
else:
snake_case_ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE )
print('''Done''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 267
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 20 )-> int:
"""simple docstring"""
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
snake_case_ : Tuple = "src/transformers"
snake_case_ : Union[str, Any] = "docs/source/en"
snake_case_ : str = "."
def A (__A : Union[str, Any] , __A : str , __A : Dict ) -> List[Any]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Find the start prompt.
UpperCAmelCase_ = 0
while not lines[start_index].startswith(__snake_case ):
start_index += 1
start_index += 1
UpperCAmelCase_ = start_index
while not lines[end_index].startswith(__snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
snake_case_ : Dict = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
snake_case_ : Any = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
snake_case_ : List[str] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case_ : List[Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : int = direct_transformers_import(TRANSFORMERS_PATH)
def A (__A : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __snake_case )
return [m.group(0 ) for m in matches]
def A (__A : Optional[Any] , __A : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = 2 if text == '''✅''' or text == '''❌''' else len(__snake_case )
UpperCAmelCase_ = (width - text_length) // 2
UpperCAmelCase_ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase_ = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase_ = collections.defaultdict(__snake_case )
UpperCAmelCase_ = collections.defaultdict(__snake_case )
UpperCAmelCase_ = collections.defaultdict(__snake_case )
UpperCAmelCase_ = collections.defaultdict(__snake_case )
UpperCAmelCase_ = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(__snake_case ):
UpperCAmelCase_ = None
if attr_name.endswith('''Tokenizer''' ):
UpperCAmelCase_ = slow_tokenizers
UpperCAmelCase_ = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
UpperCAmelCase_ = fast_tokenizers
UpperCAmelCase_ = attr_name[:-13]
elif _re_tf_models.match(__snake_case ) is not None:
UpperCAmelCase_ = tf_models
UpperCAmelCase_ = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
UpperCAmelCase_ = flax_models
UpperCAmelCase_ = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
UpperCAmelCase_ = pt_models
UpperCAmelCase_ = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase_ = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ = ''''''.join(camel_case_split(__snake_case )[:-1] )
# Let's build that table!
UpperCAmelCase_ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase_ = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase_ = [len(__snake_case ) + 2 for c in columns]
UpperCAmelCase_ = max([len(__snake_case ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase_ = '''|''' + '''|'''.join([_center_text(__snake_case , __snake_case ) for c, w in zip(__snake_case , __snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
UpperCAmelCase_ = {True: '''✅''', False: '''❌'''}
for name in model_names:
UpperCAmelCase_ = model_name_to_prefix[name]
UpperCAmelCase_ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__snake_case , __snake_case ) for l, w in zip(__snake_case , __snake_case )] ) + "|\n"
return table
def A (__A : Optional[int]=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = _find_text_in_file(
filename=os.path.join(__snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
UpperCAmelCase_ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
snake_case_ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''audio_values''', '''audio_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=[16, 16] , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=44100 , SCREAMING_SNAKE_CASE=86 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=0.0 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[Any] = spectrogram_length
A : str = num_channels
A : Optional[Any] = patch_size
A : int = feature_size // self.patch_size[1]
A : int = n_fft
A : Optional[int] = sampling_rate // hop_length_to_sampling_rate
A : int = sampling_rate
A : Optional[int] = padding_value
A : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
A : Union[str, Any] = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
A : Tuple = log_spec[:, :-1]
A : Any = log_spec - 20.0
A : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : Any = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : List[str] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ):
A : Any = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A : List[Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
A : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A : str = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A : str = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A : List[Any] = audio_features[i]
A : Tuple = feature
# return as BatchFeature
if return_attention_mask:
A : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
A : Union[str, Any] = {'''audio_values''': padded_audio_features}
A : int = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_inputs
| 369
|
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Any = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
lowercase__ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
if metric == "rouge2":
lowercase__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowercase__ : Tuple = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowercase__ : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
lowercase__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
lowercase__ : Tuple = ModelCheckpoint(
dirpath=A__ , filename=A__ , monitor=f"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=A__ , verbose=A__ , )
class UpperCAmelCase_ ( pl.Callback):
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Any = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a )
@rank_zero_only
def _UpperCAmelCase ( self , a , a , a , a=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase__ : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowercase__ : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase__ : Dict = od / 'test_results.txt'
lowercase__ : str = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase__ : Any = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase__ : Union[str, Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=a )
generations_file.parent.mkdir(exist_ok=a )
with open(a , 'a+' ) as writer:
for key in sorted(a ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase__ : Dict = metrics[key]
if isinstance(a , torch.Tensor ):
lowercase__ : Optional[Any] = val.item()
lowercase__ : Dict = f"""{key}: {val:.6f}\n"""
writer.write(a )
if not save_generations:
return
if "preds" in metrics:
lowercase__ : Tuple = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(a )
@rank_zero_only
def _UpperCAmelCase ( self , a , a ) -> int:
try:
lowercase__ : int = pl_module.model.model.num_parameters()
except AttributeError:
lowercase__ : str = pl_module.model.num_parameters()
lowercase__ : Union[str, Any] = count_trainable_parameters(a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def _UpperCAmelCase ( self , a , a ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(a , a , 'test' )
@rank_zero_only
def _UpperCAmelCase ( self , a , a ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 77
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if len(UpperCamelCase ) <= 1:
return arr, 0
_a = len(UpperCamelCase ) // 2
_a = arr[0:mid]
_a = arr[mid:]
_a , _a = count_inversions_recursive(UpperCamelCase )
_a , _a = count_inversions_recursive(UpperCamelCase )
_a , _a = _count_cross_inversions(UpperCamelCase , UpperCamelCase )
_a = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
_a = []
_a = _a = _a = 0
while i < len(UpperCamelCase ) and j < len(UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case_ ():
'''simple docstring'''
_a = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_a = count_inversions_bf(UpperCamelCase )
_a , _a = count_inversions_recursive(UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_a = count_inversions_bf(UpperCamelCase )
_a , _a = count_inversions_recursive(UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCamelCase )
# an empty list should also have zero inversions
_a = []
_a = count_inversions_bf(UpperCamelCase )
_a , _a = count_inversions_recursive(UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCamelCase )
if __name__ == "__main__":
main()
| 362
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 0
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
_a = ''
_a = ''
_a = ''
_a = ''
def _A ( UpperCamelCase_ : str) -> None:
'''simple docstring'''
__lowercase = tweepy.OAuthHandler(UpperCamelCase_, UpperCamelCase_)
auth.set_access_token(UpperCamelCase_, UpperCamelCase_)
__lowercase = tweepy.API(UpperCamelCase_)
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=UpperCamelCase_, count=200)
# save most recent tweets
alltweets.extend(UpperCamelCase_)
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase_) > 0:
print(F"""getting tweets before {oldest}""")
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=UpperCamelCase_, count=200, max_id=UpperCamelCase_)
# save most recent tweets
alltweets.extend(UpperCamelCase_)
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"""...{len(UpperCamelCase_)} tweets downloaded so far""")
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""", "w") as f:
__lowercase = csv.writer(UpperCamelCase_)
writer.writerow(["id", "created_at", "text"])
writer.writerows(UpperCamelCase_)
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 17
|
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : str = 16
A : List[Any] = 32
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] = 16 ):
_A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_A : int = load_dataset('glue' ,'mrpc' )
def tokenize_function(lowerCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_A : List[str] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCAmelCase ,max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A : int = datasets.map(
__UpperCAmelCase ,batched=__UpperCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : Any = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(lowerCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A : List[Any] = 16
elif accelerator.mixed_precision != "no":
_A : Optional[Any] = 8
else:
_A : Optional[Any] = None
return tokenizer.pad(
__UpperCAmelCase ,padding='longest' ,max_length=__UpperCAmelCase ,pad_to_multiple_of=__UpperCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
_A : Tuple = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCAmelCase ,collate_fn=__UpperCAmelCase ,batch_size=__UpperCAmelCase )
_A : Tuple = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCAmelCase ,collate_fn=__UpperCAmelCase ,batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Tuple = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' ,__UpperCAmelCase ) == "1":
_A : int = 2
# Initialize accelerator
_A : str = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : List[str] = config['''lr''']
_A : Optional[Any] = int(config['num_epochs'] )
_A : Optional[Any] = int(config['seed'] )
_A : Optional[int] = int(config['batch_size'] )
_A : List[str] = evaluate.load('glue' ,'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCAmelCase )
def inner_training_loop(lowerCamelCase : int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A : str = model.to(accelerator.device )
# Instantiate optimizer
_A : str = AdamW(params=model.parameters() ,lr=__UpperCAmelCase )
_A : Tuple = get_dataloaders(__UpperCAmelCase ,__UpperCAmelCase )
# Instantiate scheduler
_A : int = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(__UpperCAmelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A : Any = accelerator.prepare(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A : List[Any] = model(**__UpperCAmelCase )
_A : Union[str, Any] = outputs.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A : Tuple = model(**__UpperCAmelCase )
_A : int = outputs.logits.argmax(dim=-1 )
_A : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__UpperCAmelCase ,references=__UpperCAmelCase ,)
_A : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,__UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__ ( ):
_A : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
_A : int = parser.parse_args()
_A : str = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase ,__UpperCAmelCase )
if __name__ == "__main__":
main()
| 352
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = torch.device('''cpu''')
def lowerCAmelCase__ ( ):
_A : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : Dict = Image.open(requests.get(lowerCamelCase ,stream=lowerCamelCase ).raw )
return im
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
_A : Union[str, Any] = dct.pop(lowerCamelCase )
_A : List[str] = val
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ):
_A : Optional[Any] = []
for k in state_dict.keys():
_A : Optional[int] = k
if ".pwconv" in k:
_A : str = k_new.replace('.pwconv' ,'.point_wise_conv' )
if ".dwconv" in k:
_A : Any = k_new.replace('.dwconv' ,'.depth_wise_conv' )
if ".Proj." in k:
_A : Optional[Any] = k_new.replace('.Proj.' ,'.proj.' )
if "patch_embed" in k_new:
_A : Optional[int] = k_new.replace('patch_embed' ,'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_A : Tuple = k_new.split('.' )
if ls[2].isdigit():
_A : List[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_A : List[str] = k_new.replace('network' ,'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : Any = 1000
_A : int = 'huggingface/label-files'
_A : List[Any] = 'imagenet-1k-id2label.json'
_A : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase ,lowerCamelCase ,repo_type='dataset' ) ,'r' ) )
_A : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : Optional[Any] = [3, 3, 6, 4]
_A : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : List[Any] = [3, 3, 9, 6]
_A : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : int = [4, 3, 10, 5]
_A : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Optional[Any] = [4, 4, 12, 6]
_A : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase ,map_location='cpu' ,check_hash=lowerCamelCase )
else:
_A : Union[str, Any] = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Union[str, Any] = checkpoint
_A : List[str] = create_rename_keys(lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(lowerCamelCase ).eval()
hf_model.load_state_dict(lowerCamelCase )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
_A : Any = processor(images=lowerCamelCase ,return_tensors='pt' )
# compare outputs from both models
_A : int = get_expected_output(lowerCamelCase )
_A : Optional[int] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,lowerCamelCase ,atol=1E-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 227
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE :Tuple = 637_8137.0
SCREAMING_SNAKE_CASE :List[str] = 635_6752.31_4245
SCREAMING_SNAKE_CASE :List[Any] = 6_37_81_37
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->float:
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
snake_case_ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
snake_case_ = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
snake_case_ = (b_lata + b_lata) / 2
snake_case_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
snake_case_ = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
snake_case_ = cos(sigma / 2 ) ** 2
snake_case_ = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
snake_case_ = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
snake_case_ = sin(sigma / 2 ) ** 2
snake_case_ = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str = "cpu" , lowerCAmelCase_ :Union[str, None] = None )->None:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
snake_case_ = v.half()
if save_path is None: # overwrite src_path
snake_case_ = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 159
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A_ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
state_dict.pop("pixel_mean" , snake_case )
state_dict.pop("pixel_std" , snake_case )
SCREAMING_SNAKE_CASE:Any = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE:Optional[Any] = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE:Optional[Any] = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE:Optional[int] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE:Dict = key.replace("layers.2" , "proj_out" )
SCREAMING_SNAKE_CASE:List[Any] = value
SCREAMING_SNAKE_CASE:Union[str, Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def A_ ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = hf_hub_download(snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE:Dict = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE:Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE:str = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE:Optional[int] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE:Optional[int] = SamConfig(
vision_config=snake_case , )
SCREAMING_SNAKE_CASE:Dict = torch.load(snake_case , map_location="cpu" )
SCREAMING_SNAKE_CASE:Optional[Any] = replace_keys(snake_case )
SCREAMING_SNAKE_CASE:str = SamImageProcessor()
SCREAMING_SNAKE_CASE:List[Any] = SamProcessor(image_processor=snake_case )
SCREAMING_SNAKE_CASE:Dict = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = hf_model.to("cuda" )
SCREAMING_SNAKE_CASE:str = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
SCREAMING_SNAKE_CASE:List[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE:List[str] = [[[400, 650]]]
SCREAMING_SNAKE_CASE:str = [[1]]
SCREAMING_SNAKE_CASE:List[Any] = processor(images=np.array(snake_case ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = hf_model(**snake_case )
SCREAMING_SNAKE_CASE:List[str] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
SCREAMING_SNAKE_CASE:str = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[str] = hf_model(**snake_case )
SCREAMING_SNAKE_CASE:Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
SCREAMING_SNAKE_CASE:List[str] = ((75, 275, 1725, 850),)
SCREAMING_SNAKE_CASE:Tuple = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Any = hf_model(**snake_case )
SCREAMING_SNAKE_CASE:List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE:List[str] = [[[400, 650], [800, 650]]]
SCREAMING_SNAKE_CASE:Optional[int] = [[1, 1]]
SCREAMING_SNAKE_CASE:Tuple = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Tuple = hf_model(**snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
A_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 139
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( __lowerCAmelCase ):
A = "naver-clova-ix/donut-base-finetuned-docvqa"
A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A = "document_qa"
A = AutoProcessor
A = VisionEncoderDecoderModel
A = ["image", "text"]
A = ["text"]
def __init__(self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Any = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase_: str = task_prompt.replace("""{user_input}""", lowerCamelCase__ )
UpperCAmelCase_: Any = self.pre_processor.tokenizer(
lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_tensors="""pt""" ).input_ids
UpperCAmelCase_: List[Any] = self.pre_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ), decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=lowerCamelCase__, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=lowerCamelCase__, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=lowerCamelCase__, ).sequences
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Union[str, Any] = self.pre_processor.batch_decode(lowerCamelCase__ )[0]
UpperCAmelCase_: Dict = sequence.replace(self.pre_processor.tokenizer.eos_token, """""" )
UpperCAmelCase_: Dict = sequence.replace(self.pre_processor.tokenizer.pad_token, """""" )
UpperCAmelCase_: Dict = re.sub(R"""<.*?>""", """""", lowerCamelCase__, count=1 ).strip() # remove first task start token
UpperCAmelCase_: Tuple = self.pre_processor.tokenajson(lowerCamelCase__ )
return sequence["answer"]
| 357
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ElectraTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[int] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: List[Any] = tokenize_chinese_chars
UpperCAmelCase_: int = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82
| 0
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 89
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase : Optional[int] = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='''dataset''' , revision=__magic_name__ )
| 116
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
lowerCAmelCase_ = 'file'
@pytest.fixture(scope='''session''' )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Any = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase : List[Any] = bytes(__magic_name__ , '''utf-8''' )
with zstd.open(__magic_name__ , '''wb''' ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __magic_name__ ) , '''w''' ) as f:
f.write(__magic_name__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase : int = input_paths[compression_format]
lowercase : Any = tmp_path / '''cache'''
lowercase : int = DownloadConfig(cache_dir=__magic_name__ , extract_compressed_file=__magic_name__ )
lowercase : Optional[Any] = cached_path(__magic_name__ , download_config=__magic_name__ )
with open(__magic_name__ ) as f:
lowercase : Optional[int] = f.read()
with open(__magic_name__ ) as f:
lowercase : str = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Dict = '''custom_cache'''
lowercase : Union[str, Any] = '''custom_extracted_dir'''
lowercase : str = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase : Tuple = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __magic_name__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__magic_name__ ) )
lowercase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : List[str] = xz_file
lowercase : Any = (
DownloadConfig(extract_compressed_file=__magic_name__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__magic_name__ )
)
lowercase : Optional[int] = cached_path(__magic_name__ , download_config=__magic_name__ )
assert Path(__magic_name__ ).parent.parts[-2:] == expected
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : Any = str(Path(__magic_name__ ).resolve() )
assert cached_path(__magic_name__ ) == text_file
# relative path
lowercase : Union[str, Any] = str(Path(__magic_name__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__magic_name__ ) == text_file
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
# relative path
lowercase : Optional[int] = '''./__missing_file__.txt'''
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : List[str] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__magic_name__ ) as f:
lowercase : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__magic_name__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
http_get('''https://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __magic_name__ )
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__magic_name__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
fsspec_head('''s3://huggingface.co''' )
| 116
| 1
|
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
UpperCAmelCase : str = F"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase : Tuple = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase : Dict = [3, 5]
UpperCAmelCase : Any = 2
UpperCAmelCase : Optional[Any] = 3
for block in range(1 , UpperCAmelCase_ ):
for _ in range(UpperCAmelCase_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ = 0
try:
lowercase__ = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 151
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
a :Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
a :Any = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_zero=_lowerCamelCase , )
torch.manual_seed(0 )
a :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
a :List[Any] = CLIPTextModel(_lowerCamelCase )
a :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :int = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Optional[Any] = torch.manual_seed(_lowerCamelCase )
else:
a :Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Dict = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :List[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Any = torch.manual_seed(_lowerCamelCase )
else:
a :Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Any = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
a :Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
a :int = self.get_dummy_components()
a :int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
a :Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
a :Any = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
a :List[Any] = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a :Any = self.get_dummy_inputs(_lowerCamelCase )
a :List[str] = pipe_loaded(**_lowerCamelCase )[0]
a :List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''cpu'''
a :Tuple = self.get_dummy_components()
a :Optional[int] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Dict = self.get_dummy_mask_inputs(_lowerCamelCase )
a :Union[str, Any] = pipe.generate_mask(**_lowerCamelCase )
a :Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
a :Tuple = np.array([0] * 9 )
a :int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''cpu'''
a :Tuple = self.get_dummy_components()
a :Tuple = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :int = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :Tuple = pipe.invert(**_lowerCamelCase ).images
a :Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = '''cpu'''
a :Any = self.get_dummy_components()
a :str = {'''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
a :Union[str, Any] = DPMSolverMultistepScheduler(**_lowerCamelCase )
a :int = DPMSolverMultistepInverseScheduler(**_lowerCamelCase )
a :Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :int = pipe.invert(**_lowerCamelCase ).images
a :Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :Tuple = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
a :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
a :Optional[int] = raw_image.convert('''RGB''' ).resize((768, 768) )
a :List[Any] = raw_image
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = torch.manual_seed(0 )
a :Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Dict = DDIMScheduler.from_config(pipe.scheduler.config )
a :Dict = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Union[str, Any] = '''a bowl of fruit'''
a :str = '''a bowl of pears'''
a :Any = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :str = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase ).latents
a :Dict = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
a :int = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = torch.manual_seed(0 )
a :List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a :Dict = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Tuple = '''a bowl of fruit'''
a :List[Any] = '''a bowl of pears'''
a :Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :Optional[Any] = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase , num_inference_steps=25 , ).latents
a :Any = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
a :Optional[int] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 281
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ):
"""simple docstring"""
a :List[Any] = 0
a :List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(a__ )
# Let's go
__SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(a__ , """func""" ):
parser.print_help()
exit(1 )
# Run
__SCREAMING_SNAKE_CASE = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 267
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase : int = random.Random()
def a__ ( a__ , a__=1.0 , a__=None , a__=None ):
"""simple docstring"""
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : Any=2_000 , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=4_000 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = hop_length
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(__SCREAMING_SNAKE_CASE : Dict ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test truncation required
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
__SCREAMING_SNAKE_CASE = [x[: feature_extractor.n_samples] for x in speech_inputs]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 , 32 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )[0]
__SCREAMING_SNAKE_CASE = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__SCREAMING_SNAKE_CASE = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE ) - 1 ) < 1E-3 ) )
| 267
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = 'src/transformers'
__UpperCAmelCase = 'docs/source/en/tasks'
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with open(_A, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_A, set() )
SCREAMING_SNAKE_CASE_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def A__ ( __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = _find_text_in_file(
filename=os.path.join(_A, _A ), start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''', end_prompt='''<!--End of the generated tip-->''', )
SCREAMING_SNAKE_CASE_ = get_model_list_for_task(_A )
if current_list != new_list:
if overwrite:
with open(os.path.join(_A, _A ), '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 358
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE_ = model(_A , decoder_input_ids=_A ).logits
SCREAMING_SNAKE_CASE_ = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE_ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 257
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : float = 0.0, UpperCamelCase_ : float = 1.0) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Tuple:
lowerCamelCase__ : List[str] =fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , snake_case_ ).groups()[0]
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =file_names
lowerCamelCase__ : str =image_transform
lowerCamelCase__ : str =label_to_id
def __len__( self :Optional[int] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self :Optional[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.file_names[idx]
lowerCamelCase__ : Dict =PIL.Image.open(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int =self.image_transform(lowerCamelCase_ )
lowerCamelCase__ : List[str] =extract_label(lowerCamelCase_ )
if self.label_to_id is not None:
lowerCamelCase__ : Optional[int] =self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) ->Dict:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[str] =int(config['seed'] )
lowerCamelCase__ : Dict =int(config['batch_size'] )
lowerCamelCase__ : Optional[int] =config['image_size']
if not isinstance(snake_case_ , (list, tuple) ):
lowerCamelCase__ : Optional[int] =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Optional[Any] =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Union[str, Any] =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : int =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Tuple =os.path.split(snake_case_ )[-1].split('.' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Grab all the image filenames
lowerCamelCase__ : List[str] =[os.path.join(args.data_dir , snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : str =[extract_label(snake_case_ ) for fname in file_names]
lowerCamelCase__ : Any =list(set(snake_case_ ) )
id_to_label.sort()
lowerCamelCase__ : List[Any] ={lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
lowerCamelCase__ : int =np.random.permutation(len(snake_case_ ) )
lowerCamelCase__ : Tuple =int(0.8 * len(snake_case_ ) )
lowerCamelCase__ : str =random_perm[:cut]
lowerCamelCase__ : Dict =random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : str =Compose([RandomResizedCrop(snake_case_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : Any =PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] =Compose([Resize(snake_case_ ), ToTensor()] )
lowerCamelCase__ : Dict =PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
lowerCamelCase__ : int =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Dict =create_model('resnet50d' , pretrained=snake_case_ , num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Dict =False
for param in model.get_classifier().parameters():
lowerCamelCase__ : List[str] =True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Any =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Dict =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int =torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict =OneCycleLR(optimizer=snake_case_ , max_lr=snake_case_ , epochs=snake_case_ , steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : int =0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : int =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Optional[int] =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Tuple =os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Union[str, Any] =int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Optional[int] =None
else:
lowerCamelCase__ : List[Any] =int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int =resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ , snake_case_ ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Tuple =accelerator.skip_first_batches(snake_case_ , snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : int ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] =(batch['image'] - mean) / std
lowerCamelCase__ : Any =model(snake_case_ )
lowerCamelCase__ : List[Any] =torch.nn.functional.cross_entropy(snake_case_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : int =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Union[str, Any] ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : int =(batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] =model(snake_case_ )
lowerCamelCase__ : Optional[int] =outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Union[str, Any] =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : List[str] =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(snake_case_ ),
'epoch': epoch,
} , step=snake_case_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple =f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Tuple =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : Any =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=snake_case_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=snake_case_ , default=snake_case_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=snake_case_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case_ , default=snake_case_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=snake_case_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Dict =parser.parse_args()
lowerCamelCase__ : List[str] ={'lr': 3E-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 126
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None ) -> Optional[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
A_ : Optional[Any] = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
A_ : str = nn.Parameter(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> Dict:
# set torch weights for 1-to-1 comparison
A_ : Dict = np.asarray(weights[0] )
A_ : Optional[int] = np.asarray(weights[1] )
A_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Dict:
# set torch weights for 1-to-1 comparison
A_ : Optional[int] = np.asarray(weights[0] )
A_ : Union[str, Any] = np.asarray(weights[1] )
A_ : Optional[Any] = np.asarray(weights[2] )
A_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> int:
# layernorm 1
A_ : Optional[Any] = weights[0][0][0]
A_ : Optional[int] = np.asarray(layer_norm_a[0] )
A_ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
A_ : Dict = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
A_ : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
A_ : List[str] = intermediate_weights[2]
# layernorm 2
A_ : Tuple = np.asarray(intermediate_weights[0][0] )
A_ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
A_ : Optional[int] = np.asarray(intermediate_weights[1][0] )
A_ : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
A_ : Tuple = np.asarray(intermediate_weights[4][0] )
A_ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> str:
# reformer model
A_ : List[Any] = torch_model.reformer
# word embeds
A_ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
A_ : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A_ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
A_ : Optional[int] = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
A_ : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A_ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
A_ : Optional[Any] = np.asarray(weights[7][0] )
A_ : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
A_ : Optional[int] = np.asarray(weights[9][0] )
A_ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Tuple:
# Initialise PyTorch model
A_ : Union[str, Any] = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : Tuple = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as f:
A_ : Tuple = pickle.load(lowerCamelCase__ )['''weights''']
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 4
|
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='distilbert'
__lowerCamelCase : List[str] ={
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str , __lowercase : Optional[Any]=30522 , __lowercase : Optional[int]=512 , __lowercase : Union[str, Any]=False , __lowercase : Tuple=6 , __lowercase : Optional[Any]=12 , __lowercase : int=768 , __lowercase : Optional[Any]=4 * 768 , __lowercase : List[str]=0.1 , __lowercase : int=0.1 , __lowercase : List[Any]="gelu" , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=0.1 , __lowercase : Any=0.2 , __lowercase : Optional[int]=0 , **__lowercase : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = sinusoidal_pos_embds
__a = n_layers
__a = n_heads
__a = dim
__a = hidden_dim
__a = dropout
__a = attention_dropout
__a = activation
__a = initializer_range
__a = qa_dropout
__a = seq_classif_dropout
super().__init__(**__lowercase , pad_token_id=__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 302
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 1
|
from pathlib import Path
import fire
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase )
dest_dir.mkdir(exist_ok=__UpperCAmelCase )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ = dest_dir.joinpath(path.name )
print(__UpperCAmelCase )
dest_path.open('w' ).write('\n'.join(__UpperCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 210
|
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase_ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 210
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowerCAmelCase : str = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowerCAmelCase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Any:
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
A_ : List[str] = json.loads(f.read() )
A_ : Any = collections.OrderedDict()
A_ : List[Any] = collections.OrderedDict()
A_ : Tuple = collections.OrderedDict()
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
A_ : Any = f.readlines()
A_ : Optional[Any] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCAmelCase__ ):
A_ : Any = b
A_ : Tuple = idx
for wd in b:
A_ : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Union[str, Any] , snake_case :str , snake_case :str , snake_case :Optional[Any]="<|endoftext|>" , snake_case :int="<|endoftext|>" , snake_case :str="<|startoftext|>" , snake_case :Any="<|endoftext|>" , snake_case :Dict=False , **snake_case :Tuple , ):
'''simple docstring'''
super().__init__(
unk_token=A__ , pad_token=A__ , bos_token=A__ , eos_token=A__ , do_clean_text=A__ , **A__ , )
if not os.path.isfile(A__ ):
raise ValueError(
f"Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(A__ ):
raise ValueError(
f"Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
A_ : Tuple = do_clean_text
A_ , A_ , A_ , A_ : str = load_vocab_and_emoji(A__ , A__ )
A_ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(A__ , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] ):
'''simple docstring'''
return self.vocab.get(A__ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(A__ )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict ):
'''simple docstring'''
A_ : int = "".join(A__ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple ):
'''simple docstring'''
A_ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
A_ : Any = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :Dict = None ):
'''simple docstring'''
A_ : int = 0
if os.path.isdir(A__ ):
A_ : Optional[Any] = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ : Optional[Any] = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
A_ : Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
A_ : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(A__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
A_ : Union[str, Any] = token_index
writer.write(",".join(A__ ) + "\n" )
index += 1
with open(A__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , A__ )
return vocab_file, emoji_file
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :List[Any] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : int = vocab # same as swe
A_ : Any = ids_to_tokens # same as bpe
A_ : Optional[int] = emoji
A_ : str = np.max([len(A__ ) for w in self.vocab.keys()] )
A_ : List[str] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
A_ : Any = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
A_ : Tuple = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
A_ : Any = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ : int = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ : List[Any] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
A_ : Any = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
A_ : int = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
A_ : Optional[int] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str ):
'''simple docstring'''
A_ : List[str] = self.content_repattera.sub("<URL>" , A__ )
A_ : Optional[int] = self.content_repattera.sub("<EMAIL>" , A__ )
A_ : List[str] = self.content_repattera.sub("<TEL>" , A__ )
A_ : str = self.content_repattera.sub("<DATE>" , A__ )
A_ : Optional[int] = self.content_repattera.sub("<DATE>" , A__ )
A_ : List[str] = self.content_repattera.sub("<PRICE>" , A__ )
A_ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A_ : Tuple = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str , snake_case :Union[str, Any]=False ):
'''simple docstring'''
A_ : Any = text.replace(" " , "<SP>" )
A_ : List[str] = text.replace(" " , "<SP>" )
A_ : List[Any] = text.replace("\r\n" , "<BR>" )
A_ : List[str] = text.replace("\n" , "<BR>" )
A_ : Union[str, Any] = text.replace("\r" , "<BR>" )
A_ : Tuple = text.replace("\t" , "<TAB>" )
A_ : List[str] = text.replace("—" , "ー" )
A_ : Any = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
A_ : Tuple = text.replace(A__ , A__ )
if clean:
A_ : Union[str, Any] = self.clean_text(A__ )
def check_simbol(snake_case :int ):
A_ : Any = x.encode()
if len(A__ ) == 1 and len(A__ ) == 2:
A_ : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2_a1 and c <= 0xc2_bf)
or (c >= 0xc7_80 and c <= 0xc7_83)
or (c >= 0xca_b9 and c <= 0xcb_bf)
or (c >= 0xcc_80 and c <= 0xcd_a2)
):
return True
return False
def checkuae(snake_case :Any ):
A_ : str = x.encode()
if len(A__ ) == 1 and len(A__ ) == 3:
A_ : Any = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_80_80 and c <= 0xe2_b0_7f:
return True
return False
A_ : Tuple = 0
A_ : int = []
while pos < len(A__ ):
A_ : Any = min(len(A__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
A_ : Union[str, Any] = [] # (token_id, token, pos)
for e in range(A__ , A__ , -1 ):
A_ : Dict = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A__ ) > 2:
A_ : str = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A__ ) > 0:
# the smallest token_id is adopted
A_ , A_ , A_ : List[Any] = sorted(A__ , key=lambda snake_case : x[0] )[0]
result.append(A__ )
A_ : Union[str, Any] = e
else:
A_ : Any = pos + 1
A_ : Optional[Any] = text[pos:end]
if check_simbol(A__ ):
result.append("<KIGOU>" )
elif checkuae(A__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
A_ : Optional[int] = end
return result
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[str] , snake_case :int="\n" ):
'''simple docstring'''
A_ : List[str] = []
A_ : str = []
A_ : Union[str, Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("utf-8" , errors="replace" ) )
A_ : int = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(A__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(A__ )
if len(A__ ) > 0:
words.append(bytearray(A__ ).decode("utf-8" , errors="replace" ) )
A_ : int = "".join(A__ )
return text
| 300
|
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase = n - 1
lowercase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase = 0
while count < prec:
lowercase = random.randint(2 , n - 1 )
lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowercase = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowercase = False
break
lowercase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase__ :Tuple = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 101
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = FunnelTokenizer
_UpperCamelCase : List[str] = FunnelTokenizerFast
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Dict = True
def lowercase ( self: Any ) -> str:
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self: List[Any] , **_SCREAMING_SNAKE_CASE: Any ) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase ( self: str , **_SCREAMING_SNAKE_CASE: str ) -> Optional[int]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = '''UNwant\u00E9d,running'''
UpperCamelCase_ = '''unwanted, running'''
return input_text, output_text
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self: int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
UpperCamelCase_ = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase_ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase_ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 357
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82
|
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
while a != 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if gcd(_A , _A ) != 1:
lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCamelCase__ : Tuple = ua // va
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 184
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ :
"""simple docstring"""
def __init__( self :str , lowercase_ :Optional[int]=False , lowercase_ :Optional[Any]=False , lowercase_ :str=6.0 , lowercase_ :Optional[Any]=None , lowercase_ :Any=False , lowercase_ :int=False , lowercase_ :Tuple=None , lowercase_ :str="fp4" , lowercase_ :Optional[int]=False , **lowercase_ :Any , ) -> Dict:
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = llm_inta_threshold
UpperCAmelCase = llm_inta_skip_modules
UpperCAmelCase = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase = llm_inta_has_fpaa_weight
UpperCAmelCase = bnb_abit_quant_type
UpperCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase = torch.floataa
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = getattr(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , torch.dtype ):
UpperCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase__ ( self :Any ) -> Any:
if not isinstance(self.llm_inta_threshold , lowercase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowercase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowercase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase__ ( self :Any ) -> Any:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase__ ( cls :List[str] , lowercase_ :int , lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = cls(**lowercase_ )
UpperCAmelCase = []
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
to_remove.append(lowercase_ )
for key in to_remove:
kwargs.pop(lowercase_ , lowercase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, os.PathLike] ) -> Any:
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase = self.to_dict()
UpperCAmelCase = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '\n'
writer.write(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict[str, Any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self :Union[str, Any] ) -> int:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :bool = True ) -> str:
if use_diff is True:
UpperCAmelCase = self.to_diff_dict()
else:
UpperCAmelCase = self.to_dict()
return json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + "\n"
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Dict[str, Any]:
UpperCAmelCase = self.to_dict()
# get the default config dict
UpperCAmelCase = BitsAndBytesConfig().to_dict()
UpperCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase = value
return serializable_config_dict
| 181
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , lowercase_ :Optional[Any] , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[str]=True , lowercase_ :Dict=True , lowercase_ :str=True , lowercase_ :Optional[Any]=True , lowercase_ :Dict=99 , lowercase_ :int=32 , lowercase_ :str=5 , lowercase_ :Dict=4 , lowercase_ :Tuple=37 , lowercase_ :Dict="gelu" , lowercase_ :List[str]=0.1 , lowercase_ :int=0.1 , lowercase_ :Any=5_12 , lowercase_ :Optional[Any]=16 , lowercase_ :Optional[int]=2 , lowercase_ :Union[str, Any]=0.02 , lowercase_ :Dict=False , lowercase_ :Tuple=True , lowercase_ :Optional[Any]="None" , lowercase_ :int=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[int]=None , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :str , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int ) -> Any:
UpperCAmelCase = DebertaVaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :Optional[int] ) -> Dict:
UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = DebertaVaForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self :str ) -> Tuple:
pass
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000_000 ) -> List[Any]:
"""simple docstring"""
A__ = 1
A__ = 1
A__ = {1: 1}
for inputa in range(2 , __a ):
A__ = 0
A__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
A__ = (3 * number) + 1
counter += 1
if inputa not in counters:
A__ = counter
if counter > pre_counter:
A__ = inputa
A__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 14
|
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = VOCAB_FILES_NAMES
a : List[str] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["input_ids", "attention_mask"]
a : int = MvpTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="replace" ,_lowerCamelCase="<s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="</s>" ,_lowerCamelCase="<s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase="<pad>" ,_lowerCamelCase="<mask>" ,_lowerCamelCase=False ,_lowerCamelCase=True ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,errors=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,trim_offsets=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase = '''post_processor'''
__lowercase = getattr(self.backend_tokenizer ,_lowerCamelCase ,_lowerCamelCase )
if tokenizer_component_instance:
__lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase = tuple(state['''sep'''] )
if "cls" in state:
__lowercase = tuple(state['''cls'''] )
__lowercase = False
if state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = add_prefix_space
__lowercase = True
if state.get('''trim_offsets''' ,_lowerCamelCase ) != trim_offsets:
__lowercase = trim_offsets
__lowercase = True
if changes_to_apply:
__lowercase = getattr(_lowerCamelCase ,state.pop('''type''' ) )
__lowercase = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer ,_lowerCamelCase ,_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase ,_lowerCamelCase ) else value
__lowercase = value
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> BatchEncoding:
'''simple docstring'''
__lowercase = kwargs.get('''is_split_into_words''' ,_lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> BatchEncoding:
'''simple docstring'''
__lowercase = kwargs.get('''is_split_into_words''' ,_lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Tuple:
'''simple docstring'''
__lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 217
|
'''simple docstring'''
import heapq
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 217
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCAmelCase ):
UpperCAmelCase =["note_seq"]
def __init__( self , *snake_case , **snake_case) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['note_seq'])
@classmethod
def lowerCAmelCase ( cls , *snake_case , **snake_case) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['note_seq'])
@classmethod
def lowerCAmelCase ( cls , *snake_case , **snake_case) -> Dict:
'''simple docstring'''
requires_backends(cls , ['note_seq'])
| 363
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =parent
_UpperCAmelCase : Dict =batch_size
_UpperCAmelCase : List[Any] =seq_length
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Optional[int] =use_attention_mask
_UpperCAmelCase : Dict =use_token_type_ids
_UpperCAmelCase : Dict =use_labels
_UpperCAmelCase : Optional[Any] =vocab_size
_UpperCAmelCase : str =hidden_size
_UpperCAmelCase : Dict =num_hidden_layers
_UpperCAmelCase : Tuple =num_attention_heads
_UpperCAmelCase : List[str] =intermediate_size
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] =max_position_embeddings
_UpperCAmelCase : Union[str, Any] =type_vocab_size
_UpperCAmelCase : Dict =type_sequence_label_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Any =num_choices
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : str =None
if self.use_attention_mask:
_UpperCAmelCase : Dict =random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase : Optional[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =config_and_inputs
_UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] =config_and_inputs
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =True
UpperCAmelCase =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =FlaxRobertaPreLayerNormModelTester(self)
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Dict =model(np.ones((1, 1)))
self.assertIsNotNone(snake_case)
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Optional[int] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : str =model(snake_case)[0]
_UpperCAmelCase : int =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape) , snake_case)
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : List[str] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : Tuple =model(snake_case)[0]
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
| 242
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "ZinengTang/tvlt-base"
__magic_name__ : int = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self , **_a ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE ( self , **_a ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : str = self.get_feature_extractor()
__magic_name__ : List[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
__magic_name__ : List[Any] = np.ones([12_000] )
__magic_name__ : Optional[int] = feature_extractor(_a , return_tensors="np" )
__magic_name__ : List[str] = processor(audio=_a , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : int = TvltProcessor(image_processor=_a , feature_extractor=_a )
__magic_name__ : int = np.ones([3, 224, 224] )
__magic_name__ : int = image_processor(_a , return_tensors="np" )
__magic_name__ : List[str] = processor(images=_a , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : Dict = self.get_feature_extractor()
__magic_name__ : Tuple = TvltProcessor(image_processor=_a , feature_extractor=_a )
__magic_name__ : Tuple = np.ones([12_000] )
__magic_name__ : List[Any] = np.ones([3, 224, 224] )
__magic_name__ : str = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : str = self.get_feature_extractor()
__magic_name__ : List[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 281
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
UpperCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 177
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : TransformeraDModel ,lowercase_ : AutoencoderKL ,lowercase_ : KarrasDiffusionSchedulers ,lowercase_ : Optional[Dict[int, str]] = None ,):
super().__init__()
self.register_modules(transformer=lowercase_ ,vae=lowercase_ ,scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
lowerCAmelCase__ : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
lowerCAmelCase__ : Union[str, Any] = int(lowercase_ )
lowerCAmelCase__ : int = dict(sorted(self.labels.items() ) )
def __lowerCAmelCase ( self : Any ,lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Any = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : int ,lowercase_ : List[int] ,lowercase_ : float = 4.0 ,lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase_ : int = 5_0 ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,):
lowerCAmelCase__ : Optional[Any] = len(lowercase_ )
lowerCAmelCase__ : Any = self.transformer.config.sample_size
lowerCAmelCase__ : int = self.transformer.config.in_channels
lowerCAmelCase__ : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=lowercase_ ,device=self.device ,dtype=self.transformer.dtype ,)
lowerCAmelCase__ : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCAmelCase__ : List[Any] = torch.tensor(lowercase_ ,device=self.device ).reshape(-1 )
lowerCAmelCase__ : Dict = torch.tensor([1_0_0_0] * batch_size ,device=self.device )
lowerCAmelCase__ : List[Any] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCAmelCase__ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
lowerCAmelCase__ : Optional[Any] = torch.cat([half, half] ,dim=0 )
lowerCAmelCase__ : List[str] = self.scheduler.scale_model_input(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[int] = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCAmelCase__ : Any = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Dict = torch.floataa if is_mps else torch.floataa
else:
lowerCAmelCase__ : List[str] = torch.intaa if is_mps else torch.intaa
lowerCAmelCase__ : List[Any] = torch.tensor([timesteps] ,dtype=lowercase_ ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCAmelCase__ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase__ : List[str] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCAmelCase__ : Dict = self.transformer(
lowercase_ ,timestep=lowercase_ ,class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCAmelCase__ ,lowerCAmelCase__ : Any = torch.split(lowercase_ ,len(lowercase_ ) // 2 ,dim=0 )
lowerCAmelCase__ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCAmelCase__ : Optional[int] = torch.cat([half_eps, half_eps] ,dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = torch.split(lowercase_ ,lowercase_ ,dim=1 )
else:
lowerCAmelCase__ : int = noise_pred
# compute previous image: x_t -> x_t-1
lowerCAmelCase__ : int = self.scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
if guidance_scale > 1:
lowerCAmelCase__ ,lowerCAmelCase__ : int = latent_model_input.chunk(2 ,dim=0 )
else:
lowerCAmelCase__ : Optional[int] = latent_model_input
lowerCAmelCase__ : List[Any] = 1 / self.vae.config.scaling_factor * latents
lowerCAmelCase__ : List[Any] = self.vae.decode(lowercase_ ).sample
lowerCAmelCase__ : Tuple = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ : Tuple = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 106
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MvpTokenizer
UpperCamelCase__ : Tuple = MvpTokenizerFast
UpperCamelCase__ : int = True
UpperCamelCase__ : Tuple = filter_roberta_detectors
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , padding=_A , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('labels' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(text_target=_A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _A ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_A , truncation=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , text_target=_A , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = inputs['input_ids']
__SCREAMING_SNAKE_CASE = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 257
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , _SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
A__ : List[Any] =self.model.config
else:
A__ : Union[str, Any] =config
A__ : Dict =data_args
A__ : Optional[int] =self.config.tgt_vocab_size if isinstance(self.config , _SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
A__ : List[str] =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A__ : Tuple =label_smoothed_nll_loss
def lowercase__ ( self : str , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
if self.optimizer is None:
A__ : Dict =["bias", "LayerNorm.weight"]
A__ : Optional[int] =[
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
A__ : int =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A__ : Any =Adafactor
A__ : int ={"scale_parameter": False, "relative_step": False}
else:
A__ : Union[str, Any] =AdamW
A__ : List[Any] ={
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
A__ : Any =self.args.learning_rate
if self.sharded_ddp:
A__ : List[str] =OSS(
params=_SCREAMING_SNAKE_CASE , optim=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
A__ : Any =optimizer_cls(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
A__ : Optional[int] =self._get_lr_scheduler(_SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
A__ : List[str] =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A__ : Dict =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A__ : Optional[Any] =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A__ : str =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
return scheduler
def lowercase__ ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A__ : str =model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
A__ : Optional[Any] =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A__ : Any =model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
A__ : List[Any] =model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
A__ : Tuple =torch.nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
A__ : str =self.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =inputs.pop("""labels""" )
A__ : List[Any] =self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return loss
def lowercase__ ( self : str , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
A__ : List[str] =self._prepare_inputs(_SCREAMING_SNAKE_CASE )
A__ : Any ={
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A__ : Tuple =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A__ : List[Any] =self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
A__ : List[str] =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
A__ : Dict =self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : str =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A__ : Any =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A__ : Optional[Any] =self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
A__ : List[str] =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
A__ : List[str] =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A__ : Union[str, Any] =tensor
return padded_tensor
| 364
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
A__ : List[str] =parent
A__ : Optional[Any] =batch_size
A__ : List[Any] =seq_length
A__ : Tuple =is_training
A__ : Optional[int] =use_input_mask
A__ : Optional[int] =use_token_type_ids
A__ : Tuple =use_labels
A__ : Dict =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : Union[str, Any] =intermediate_size
A__ : Dict =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Optional[int] =attention_probs_dropout_prob
A__ : List[str] =max_position_embeddings
A__ : str =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : List[Any] =num_labels
A__ : str =num_choices
A__ : Union[str, Any] =scope
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_input_mask:
A__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[str] =None
A__ : int =None
A__ : List[Any] =None
if self.use_labels:
A__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =DistilBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[str] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =DistilBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Dict =DistilBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =self.num_labels
A__ : Dict =DistilBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[int] =self.num_labels
A__ : Union[str, Any] =DistilBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : int =self.num_choices
A__ : Tuple =DistilBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : int =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : List[str] =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = True
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
A__ : Any =DistilBertModelTester(self )
A__ : Tuple =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] =DistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A__ : Any =True
A__ : Optional[Any] =model_class(config=lowerCAmelCase_ )
A__ : Dict =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[int] =torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) )
A__ : List[Any] =torch.jit.load(os.path.join(lowerCAmelCase_ , """traced_model.pt""" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["""input_ids"""].to(lowerCAmelCase_ ) , inputs_dict["""attention_mask"""].to(lowerCAmelCase_ ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A__ : Optional[Any] =torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ : int =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A__ : Tuple =torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Optional[int] =torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 136
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : int ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
# layernorm 1
lowerCAmelCase = weights[0][0][0]
lowerCAmelCase = np.asarray(layer_norm_a[0] )
lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# lsh weights + output
lowerCAmelCase = weights[0][1]
if len(lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase , torch_block.attention , lowerCamelCase )
else:
set_layer_weights_in_torch_local(lowerCamelCase , torch_block.attention , lowerCamelCase )
# intermediate weighs
lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase ) == 4:
lowerCAmelCase = intermediate_weights[2]
# layernorm 2
lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# intermediate dense
lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
# intermediate out
lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Dict ):
# reformer model
lowerCAmelCase = torch_model.reformer
# word embeds
lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase ) , )
if isinstance(weights[3] , lowerCamelCase ):
lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCAmelCase = nn.Parameter(torch.tensor(lowerCamelCase ) )
lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# output layer norm
lowerCAmelCase = np.asarray(weights[7][0] )
lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# output embeddings
lowerCAmelCase = np.asarray(weights[9][0] )
lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str ):
# Initialise PyTorch model
lowerCAmelCase = ReformerConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = ReformerModelWithLMHead(lowerCamelCase )
with open(lowerCamelCase , 'rb' ) as f:
lowerCAmelCase = pickle.load(lowerCamelCase )['weights']
set_model_weights_in_torch(lowerCamelCase , lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 4
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4
| 1
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCamelCase = logging.getLogger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
UpperCAmelCase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
UpperCAmelCase_ = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = threshold
def lowercase__ ( self : List[str] , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = patience
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase_ = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=False , ) -> Optional[int]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase_ = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
UpperCAmelCase_ = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase_ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = encoder_hidden_states.size()
UpperCAmelCase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
UpperCAmelCase_ = self.invert_attention_mask(_UpperCAmelCase )
else:
UpperCAmelCase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase_ = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
UpperCAmelCase_ = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
UpperCAmelCase_ = embedding_output
if self.training:
UpperCAmelCase_ = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase_ = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
UpperCAmelCase_ = self.pooler(_UpperCAmelCase )
UpperCAmelCase_ = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase_ = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = self.pooler(encoder_outputs[0] )
UpperCAmelCase_ = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase_ = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
UpperCAmelCase_ = self.pooler(_UpperCAmelCase )
UpperCAmelCase_ = output_layers[i](_UpperCAmelCase )
if regression:
UpperCAmelCase_ = logits.detach()
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = logits
if patient_counter == self.patience:
break
UpperCAmelCase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , SCREAMING_SNAKE_CASE , )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = BertModelWithPabee(_UpperCAmelCase )
UpperCAmelCase_ = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase_ = (logits[-1],)
if labels is not None:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase_ = (total_loss / total_weights,) + outputs
return outputs
| 241
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''rwkv'''
UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 241
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def snake_case_( self ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def snake_case_( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def snake_case_( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def snake_case_( self ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(A , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(A ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(A )
_SCREAMING_SNAKE_CASE = rays.view(A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def snake_case_( self , A ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(A , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(A , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(A , 1 , 3 )
+ self.x.view(A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(A , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=A )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(A , *A , 2 , 3 )
def snake_case_( self , A , A ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=A , height=A , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase ( __lowerCamelCase : int ) ->DifferentiableProjectiveCamera:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(__lowerCamelCase , __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
| 58
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( a ):
lowercase__ : List[str] = ["""image_processor""", """tokenizer"""]
lowercase__ : Any = """OwlViTImageProcessor"""
lowercase__ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
SCREAMING_SNAKE_CASE = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Union[str, Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[int]="max_length" , _UpperCamelCase : Union[str, Any]="np" , **_UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
SCREAMING_SNAKE_CASE = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
SCREAMING_SNAKE_CASE = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
SCREAMING_SNAKE_CASE = t + [" "] * (max_num_queries - len(lowercase_ ))
SCREAMING_SNAKE_CASE = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
SCREAMING_SNAKE_CASE = BatchEncoding()
SCREAMING_SNAKE_CASE = input_ids
SCREAMING_SNAKE_CASE = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE = BatchEncoding()
SCREAMING_SNAKE_CASE = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
SCREAMING_SNAKE_CASE = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __snake_case( self : List[Any] , *_UpperCamelCase : str , **_UpperCamelCase : int ) -> str:
'''simple docstring'''
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def __snake_case( self : Dict , *_UpperCamelCase : str , **_UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def __snake_case( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def __snake_case( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __snake_case( self : List[str] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 369
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Tuple = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Union[str, Any]="group" , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=100 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : str="mean" , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : str=(512, 512, 512, 512, 1_500) , _UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : Any=512 , _UpperCamelCase : str=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 206
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : str = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
|
import csv
import tweepy
# Twitter API credentials
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : List[Any] = """"""
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__lowercase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(lowercase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
__lowercase = csv.writer(lowercase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 210
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Any = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 251
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_snake_case = _symbol_database.Default()
_snake_case = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
_snake_case = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_snake_case = None
_snake_case = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_snake_case = 45
_snake_case = 1581
_snake_case = 1517
_snake_case = 1570
_snake_case = 1584
_snake_case = 1793
_snake_case = 1795
_snake_case = 1916
_snake_case = 1864
_snake_case = 1905
_snake_case = 1919
_snake_case = 2429
_snake_case = 2208
_snake_case = 2418
_snake_case = 2323
_snake_case = 2407
# @@protoc_insertion_point(module_scope)
| 157
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : int = 25_00_04
lowerCamelCase__ : Any = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Optional[int] = MBartTokenizer
__a : Union[str, Any] = MBartTokenizerFast
__a : Union[str, Any] = True
__a : Union[str, Any] = True
def __snake_case ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : str = MBartTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict = MBartTokenizer(_A , keep_accents=_A )
_UpperCAmelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : Dict = tempfile.mkdtemp()
_UpperCAmelCase : str = tokenizer_r.save_pretrained(_A )
_UpperCAmelCase : str = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : int = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Any = tokenizer_r.save_pretrained(_A , legacy_format=_A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : Any = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(_A , legacy_format=_A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
__a : Optional[Any] = """facebook/mbart-large-en-ro"""
__a : Dict = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__a : List[str] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__a : int = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_UpperCAmelCase : Tuple = 1
return cls
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
_UpperCAmelCase : List[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_UpperCAmelCase : str = self.tokenizer.decode(_A , skip_special_tokens=_A )
_UpperCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _A )
_UpperCAmelCase : str = 10
_UpperCAmelCase : str = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
_UpperCAmelCase : Any = MBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors="""pt""" )
_UpperCAmelCase : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_UpperCAmelCase : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors="""pt""" )
_UpperCAmelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors="""pt""" )
_UpperCAmelCase : str = targets["""input_ids"""]
_UpperCAmelCase : List[Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 246
| 0
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self , __a , __a ):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self , __a=0 , __a=(4, 4, 64, 64) , __a=False ):
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case ( self , __a=False , __a="CompVis/stable-diffusion-v1-4" ):
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = "bf16" if fpaa else None
__lowerCAmelCase , __lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder="unet" , dtype=__a , revision=__a )
return model, params
def snake_case ( self , __a=0 , __a=(4, 77, 7_68) , __a=False ):
__lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__a )
__lowerCAmelCase = self.get_latents(__a , fpaa=__a )
__lowerCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__lowerCAmelCase = model.apply(
{"params": params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__a )
__lowerCAmelCase = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__lowerCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 77, 10_24) , fpaa=__a )
__lowerCAmelCase = model.apply(
{"params": params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 369
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def _lowerCamelCase ( _UpperCamelCase = 200_0000 ):
'''simple docstring'''
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowercase :
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase_ (self , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ):
"""simple docstring"""
a = 4
a = 32
a = (32, 32)
a = torch.manual_seed(0 )
a = torch.device(snake_case__ )
a = (batch_size, num_channels) + sizes
a = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ )
a = {"hidden_states": hidden_states}
if include_temb:
a = 128
a = randn_tensor((batch_size, temb_channels) , generator=snake_case__ , device=snake_case__ )
if include_res_hidden_states_tuple:
a = torch.manual_seed(1 )
a = (randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ ),)
if include_encoder_hidden_states:
a = floats_tensor((batch_size, 32, 32) ).to(snake_case__ )
if include_skip_sample:
a = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case__ , device=snake_case__ )
return dummy_input
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
a = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.prepare_init_args_and_inputs_for_common()
a = self.block_class(**snake_case__ )
unet_block.to(snake_case__ )
unet_block.eval()
with torch.no_grad():
a = unet_block(**snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
a = output[0]
self.assertEqual(output.shape , self.output_shape )
a = output[0, -1, -3:, -3:]
a = torch.tensor(snake_case__ ).to(snake_case__ )
assert torch_all_close(output_slice.flatten() , snake_case__ , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_init_args_and_inputs_for_common()
a = self.block_class(**snake_case__ )
model.to(snake_case__ )
model.train()
a = model(**snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
a = output[0]
a = torch.device(snake_case__ )
a = randn_tensor(output.shape , device=snake_case__ )
a = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
loss.backward()
| 227
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=7 ,_lowerCamelCase=3 ,_lowerCamelCase=18 ,_lowerCamelCase=30 ,_lowerCamelCase=400 ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase=[0.5, 0.5, 0.5] ,_lowerCamelCase=[0.5, 0.5, 0.5] ,_lowerCamelCase=False ,) -> int:
'''simple docstring'''
__lowercase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_reduce_labels
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCAmelCase ( ):
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowercase = Image.open(dataset[0]['''file'''] )
__lowercase = Image.open(dataset[1]['''file'''] )
return image, map
def _lowerCAmelCase ( ):
__lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowercase = Image.open(ds[0]['''file'''] )
__lowercase = Image.open(ds[1]['''file'''] )
__lowercase = Image.open(ds[2]['''file'''] )
__lowercase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = BeitImageProcessor if is_vision_available() else None
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BeitImageProcessingTester(self )
@property
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase ,'''image_std''' ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels ,_lowerCamelCase )
__lowercase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(_lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
__lowercase = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,maps[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__lowercase = image_processing(_lowerCamelCase ,_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(_lowerCamelCase ,_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__lowercase = prepare_semantic_batch_inputs()
__lowercase = image_processing(_lowerCamelCase ,_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(
encoding['''labels'''].shape ,(
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
self.assertEqual(encoding['''labels'''].dtype ,torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(_lowerCamelCase ,_lowerCamelCase ,return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__lowercase = True
__lowercase = image_processing(_lowerCamelCase ,_lowerCamelCase ,return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 352
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ) ->List[Any]:
for attribute in key.split('.' ):
lowerCamelCase__ : str =getattr(snake_case_ , snake_case_ )
if weight_type is not None:
lowerCamelCase__ : int =getattr(snake_case_ , snake_case_ ).shape
else:
lowerCamelCase__ : Dict =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ : Union[str, Any] =value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__ : Tuple =value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] =value
else:
lowerCamelCase__ : Optional[Any] =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any ) ->List[Any]:
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : List[str] =fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] =hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : List[Any] =False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : int ='hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
lowerCamelCase__ : Tuple =True
if "*" in mapped_key:
lowerCamelCase__ : str =name.split(snake_case_ )[0].split('.' )[-2]
lowerCamelCase__ : str =mapped_key.replace('*' , snake_case_ )
if "weight_g" in name:
lowerCamelCase__ : Any ='weight_g'
elif "weight_v" in name:
lowerCamelCase__ : Any ='weight_v'
elif "weight" in name:
lowerCamelCase__ : List[Any] ='weight'
elif "bias" in name:
lowerCamelCase__ : Optional[int] ='bias'
else:
lowerCamelCase__ : List[str] =None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict ) ->List[str]:
lowerCamelCase__ : Tuple =full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : str =name.split('.' )
lowerCamelCase__ : List[str] =int(items[0] )
lowerCamelCase__ : Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ : int =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ : Union[str, Any] =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ : List[Any] =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ : Tuple =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None , snake_case_ : int=None , snake_case_ : Optional[Any]=True ) ->str:
if config_path is not None:
lowerCamelCase__ : Optional[Any] =HubertConfig.from_pretrained(snake_case_ )
else:
lowerCamelCase__ : int =HubertConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ : Tuple =Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Union[str, Any] =target_dict.pad_index
lowerCamelCase__ : Dict =target_dict.bos_index
lowerCamelCase__ : Any =target_dict.eos_index
lowerCamelCase__ : Optional[int] =len(target_dict.symbols )
lowerCamelCase__ : Any =os.path.join(snake_case_ , 'vocab.json' )
if not os.path.isdir(snake_case_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
lowerCamelCase__ : Tuple =WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case_ , )
lowerCamelCase__ : List[Any] =True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : int =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
lowerCamelCase__ : List[str] =WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =HubertForCTC(snake_case_ )
else:
lowerCamelCase__ : str =HubertModel(snake_case_ )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ : int =model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 126
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids
| 126
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277
|
import warnings
from functools import wraps
from typing import Callable
def __a ( lowerCAmelCase_ : Callable ) -> Callable:
'''simple docstring'''
@wraps(lowerCAmelCase_ )
def _inner_fn(*lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Tuple ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowerCAmelCase_ ,)
return fn(*lowerCAmelCase_ ,**lowerCAmelCase_ )
return _inner_fn
| 277
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = generate_pascal_triangle(UpperCAmelCase_ )
for row_idx in range(UpperCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase : list[list[int]] = []
for current_row_idx in range(UpperCAmelCase_ ):
UpperCAmelCase : List[str] = populate_current_row(UpperCAmelCase_ , UpperCAmelCase_ )
triangle.append(UpperCAmelCase_ )
return triangle
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase : List[str] = 1, 1
for current_col_idx in range(1 , UpperCAmelCase_ ):
calculate_current_element(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return current_row
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : int = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase : int = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase : str = above_to_left_elt + above_to_right_elt
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = [0] + result[-1] + [0]
UpperCAmelCase : Dict = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase : int = sum(divmod(UpperCAmelCase_ , 2 ) )
UpperCAmelCase : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase : int = row_first_half + row_second_half
result.append(UpperCAmelCase_ )
return result
def UpperCamelCase( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
UpperCAmelCase : int = F"""{func.__name__}({value})"""
UpperCAmelCase : Optional[Any] = timeit(F"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 151
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a : Dict = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any=None ,_lowerCamelCase : int=None ,_lowerCamelCase : Optional[int]=None ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : Any=None ,_lowerCamelCase : List[Any]=None ,) -> Any:
if attention_mask is None:
_lowerCAmelCase : Optional[Any] = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
_lowerCAmelCase : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=32 , a__=2 , a__=1 , a__=0 , a__=0.0_2 , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = eos_token_id
_lowerCAmelCase : Any = pad_token_id
_lowerCAmelCase : Optional[Any] = bos_token_id
_lowerCAmelCase : Tuple = initializer_range
def __A ( self ):
_lowerCAmelCase : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCAmelCase : Dict = shift_tokens_right(a__ , 1 , 2 )
_lowerCAmelCase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a__ , )
_lowerCAmelCase : List[str] = prepare_blenderbot_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = 20
_lowerCAmelCase : Any = model_class_name(a__ )
_lowerCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
_lowerCAmelCase : Union[str, Any] = model.decode(a__ , a__ )
_lowerCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = 20
_lowerCAmelCase : Any = model_class_name(a__ )
_lowerCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : str = model.decode(a__ , a__ , decoder_attention_mask=a__ )
_lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class __A ( unittest.TestCase ):
_UpperCamelCase : str = 99
def __A ( self ):
_lowerCAmelCase : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowerCAmelCase : List[str] = input_ids.shape[0]
_lowerCAmelCase : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = self._get_config_and_data()
_lowerCAmelCase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(a__ )
_lowerCAmelCase : Optional[Any] = lm_model(input_ids=a__ )
_lowerCAmelCase : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , a__ )
def __A ( self ):
_lowerCAmelCase : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowerCAmelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(a__ )
_lowerCAmelCase : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_lowerCAmelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_lowerCAmelCase : List[Any] = lm_model(input_ids=a__ , decoder_input_ids=a__ )
_lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , a__ )
def __A ( self ):
_lowerCAmelCase : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_lowerCAmelCase : Dict = shift_tokens_right(a__ , 1 , 2 )
_lowerCAmelCase : Tuple = np.equal(a__ , 1 ).astype(np.floataa ).sum()
_lowerCAmelCase : Optional[int] = np.equal(a__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = True
_UpperCamelCase : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __A ( self ):
_lowerCAmelCase : Optional[Any] = FlaxBlenderbotSmallModelTester(self )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Dict = model_class(a__ )
@jax.jit
def encode_jitted(a__ , a__=None , **a__ ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[Any] = encode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Optional[Any] = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[Any] = model_class(a__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowerCAmelCase : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a__ , a__ , a__ ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Union[str, Any] = decode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[Any] = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCAmelCase : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCAmelCase : Optional[int] = model(a__ )
self.assertIsNotNone(a__ )
| 126
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : Any = input("""Enter message: """ )
_lowerCAmelCase : List[Any] = int(input(f"Enter key [2-{len(_lowerCamelCase ) - 1}]: " ) )
_lowerCAmelCase : Optional[Any] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_lowerCAmelCase : Tuple = encrypt_message(_lowerCamelCase ,_lowerCamelCase )
elif mode.lower().startswith("""d""" ):
_lowerCAmelCase : Dict = decrypt_message(_lowerCamelCase ,_lowerCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Dict = [""""""] * key
for col in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = col
while pointer < len(_lowerCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : str = math.ceil(len(_lowerCamelCase ) / key )
_lowerCAmelCase : Union[str, Any] = key
_lowerCAmelCase : Any = (num_cols * num_rows) - len(_lowerCamelCase )
_lowerCAmelCase : Dict = [""""""] * num_cols
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_lowerCAmelCase : str = 0
row += 1
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 126
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(_a)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(_a)
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE : Tuple = full_name.split("/")
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}")
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}")
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE : Optional[Any] = name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE : Dict = 0
for _name in name:
if _name.startswith("layer_with_weights"):
depth += 1
else:
break
layer_depth.append(_a)
# read data
SCREAMING_SNAKE_CASE : Optional[int] = tf.train.load_variable(_a , _a)
names.append("/".join(_a))
arrays.append(_a)
logger.info(f"Read a total of {len(_a):,} layers")
# Sanity check
if len(set(_a)) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(_a))})")
SCREAMING_SNAKE_CASE : int = list(set(_a))[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads.")
# convert layers
logger.info("Converting weights...")
for full_name, array in zip(_a , _a):
SCREAMING_SNAKE_CASE : int = full_name.split("/")
SCREAMING_SNAKE_CASE : List[str] = model
SCREAMING_SNAKE_CASE : List[str] = []
for i, m_name in enumerate(_a):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights"):
SCREAMING_SNAKE_CASE : List[str] = int(m_name.split("-")[-1])
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"])
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , "embeddings")
SCREAMING_SNAKE_CASE : Any = getattr(_a , "LayerNorm")
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4)])
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "encoder")
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "layer")
SCREAMING_SNAKE_CASE : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"])
SCREAMING_SNAKE_CASE : Dict = getattr(_a , "pooler")
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "dense")
elif m_name == "embeddings":
trace.append("embeddings")
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , "embeddings")
if layer_num == 0:
trace.append("word_embeddings")
SCREAMING_SNAKE_CASE : str = getattr(_a , "word_embeddings")
elif layer_num == 1:
trace.append("position_embeddings")
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "position_embeddings")
elif layer_num == 2:
trace.append("token_type_embeddings")
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "token_type_embeddings")
else:
raise ValueError(f"Unknown embedding layer with name {full_name}")
trace.append("weight")
SCREAMING_SNAKE_CASE : str = getattr(_a , "weight")
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"])
SCREAMING_SNAKE_CASE : List[str] = getattr(_a , "attention")
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "self")
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"])
SCREAMING_SNAKE_CASE : str = getattr(_a , "attention")
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , "output")
SCREAMING_SNAKE_CASE : Tuple = getattr(_a , "LayerNorm")
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"])
SCREAMING_SNAKE_CASE : int = getattr(_a , "attention")
SCREAMING_SNAKE_CASE : List[str] = getattr(_a , "output")
SCREAMING_SNAKE_CASE : int = getattr(_a , "dense")
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"])
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "output")
SCREAMING_SNAKE_CASE : str = getattr(_a , "dense")
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"])
SCREAMING_SNAKE_CASE : Dict = getattr(_a , "output")
SCREAMING_SNAKE_CASE : List[str] = getattr(_a , "LayerNorm")
elif m_name == "_key_dense":
# attention key
trace.append("key")
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "key")
elif m_name == "_query_dense":
# attention query
trace.append("query")
SCREAMING_SNAKE_CASE : int = getattr(_a , "query")
elif m_name == "_value_dense":
# attention value
trace.append("value")
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_a , "value")
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"])
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , "intermediate")
SCREAMING_SNAKE_CASE : Any = getattr(_a , "dense")
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output")
SCREAMING_SNAKE_CASE : Any = getattr(_a , "output")
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias")
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "bias")
elif m_name in ["kernel", "gamma"]:
trace.append("weight")
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_a , "weight")
else:
logger.warning(f"Ignored {m_name}")
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE : List[str] = ".".join(_a)
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _a) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = array.reshape(pointer.data.shape)
if "kernel" in full_name:
SCREAMING_SNAKE_CASE : int = array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(_a)
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}")
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}")
return model
def lowerCamelCase__ ( _a , _a , _a):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}...")
SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_json_file(_a)
SCREAMING_SNAKE_CASE : Dict = BertModel(_a)
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...")
load_tfa_weights_in_bert(_a , _a , _a)
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}...")
torch.save(model.state_dict() , _a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 76
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :List[str] = logging.get_logger(__name__)
__a :Optional[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'unispeech-sat'
def __init__( self : Tuple , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : List[str]=3072 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : int=1E-5 , UpperCAmelCase : Optional[int]="group" , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Dict=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase : int=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : str=128 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=0.05 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : int=10 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : int=320 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=100 , UpperCAmelCase : Tuple=256 , UpperCAmelCase : int=256 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict="mean" , UpperCAmelCase : List[str]=False , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=256 , UpperCAmelCase : Optional[int]=(512, 512, 512, 512, 1500) , UpperCAmelCase : Union[str, Any]=(5, 3, 3, 1, 1) , UpperCAmelCase : Any=(1, 2, 3, 1, 1) , UpperCAmelCase : Dict=512 , UpperCAmelCase : Any=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : int=2 , UpperCAmelCase : List[str]=504 , **UpperCAmelCase : str , ):
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
A_ = hidden_size
A_ = feat_extract_norm
A_ = feat_extract_activation
A_ = list(UpperCAmelCase )
A_ = list(UpperCAmelCase )
A_ = list(UpperCAmelCase )
A_ = conv_bias
A_ = num_conv_pos_embeddings
A_ = num_conv_pos_embedding_groups
A_ = len(self.conv_dim )
A_ = num_hidden_layers
A_ = intermediate_size
A_ = hidden_act
A_ = num_attention_heads
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = feat_proj_dropout
A_ = final_dropout
A_ = layerdrop
A_ = layer_norm_eps
A_ = initializer_range
A_ = vocab_size
A_ = num_clusters
A_ = do_stable_layer_norm
A_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = apply_spec_augment
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
A_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ = num_codevectors_per_group
A_ = num_codevector_groups
A_ = contrastive_logits_temperature
A_ = feat_quantizer_dropout
A_ = num_negatives
A_ = codevector_dim
A_ = proj_codevector_dim
A_ = diversity_loss_weight
# ctc loss
A_ = ctc_loss_reduction
A_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ = list(UpperCAmelCase )
A_ = list(UpperCAmelCase )
A_ = list(UpperCAmelCase )
A_ = xvector_output_dim
@property
def __A ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 329
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int)
UpperCamelCase__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
UpperCamelCase__ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
UpperCamelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase__ = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 65
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : List[Any] =logging.get_logger(__name__)
__snake_case : List[str] ={
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""nat"""
snake_case_ ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self ,__lowerCamelCase=4 ,__lowerCamelCase=3 ,__lowerCamelCase=64 ,__lowerCamelCase=[3, 4, 6, 5] ,__lowerCamelCase=[2, 4, 8, 16] ,__lowerCamelCase=7 ,__lowerCamelCase=3.0 ,__lowerCamelCase=True ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.1 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-5 ,__lowerCamelCase=0.0 ,__lowerCamelCase=None ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Any = embed_dim
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : str = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = num_heads
lowerCAmelCase__ : Optional[int] = kernel_size
lowerCAmelCase__ : int = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : int = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
lowerCAmelCase__ : Optional[Any] = layer_scale_init_value
lowerCAmelCase__ : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names )
| 371
|
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : List[Any] = input_str.replace(''' ''' ,'''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCamelCase_) == 26
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase__ : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_faster()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_fastest()''' ,setup=lowerCamelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCAmelCase =get_logger()
__UpperCAmelCase =None
class a__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : List[Any] , a : int=None , a : Dict=None , **a : int ):
"""simple docstring"""
super().__init__(features=a )
import jax
from jaxlib.xla_client import Device
if isinstance(a , a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
__lowerCamelCase = device if isinstance(a , a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
__lowerCamelCase = str(jax.devices()[0] )
__lowerCamelCase = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
import jax
return {str(a ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , a ) and column:
if all(
isinstance(a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a , axis=0 )
return column
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Dict ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__lowerCamelCase = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__lowerCamelCase = {'''dtype''': jnp.intaa}
else:
__lowerCamelCase = {'''dtype''': jnp.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__lowerCamelCase = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
__lowerCamelCase = np.asarray(a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : int ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a , '''__array__''' ) and not isinstance(a , jax.Array ):
__lowerCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , a , map_list=a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : pa.Table ):
"""simple docstring"""
__lowerCamelCase = self.numpy_arrow_extractor().extract_row(a )
__lowerCamelCase = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : pa.Table ):
"""simple docstring"""
__lowerCamelCase = self.numpy_arrow_extractor().extract_column(a )
__lowerCamelCase = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
__lowerCamelCase = self.recursive_tensorize(a )
__lowerCamelCase = self._consolidate(a )
return column
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : pa.Table ):
"""simple docstring"""
__lowerCamelCase = self.numpy_arrow_extractor().extract_batch(a )
__lowerCamelCase = self.python_features_decoder.decode_batch(a )
__lowerCamelCase = self.recursive_tensorize(a )
for column_name in batch:
__lowerCamelCase = self._consolidate(batch[column_name] )
return batch
| 67
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = '''▁'''
lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCAmelCase = {
'''google/pegasus-xsum''': 5_1_2,
}
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int =['''input_ids''', '''attention_mask''']
def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is'
f' {type(lowerCAmelCase )}' )
__lowercase= (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 )
]
if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowercase= additional_special_tokens_extended
else:
__lowercase= [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowercase= mask_token_sent
__lowercase= vocab_file
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# add special tokens to encoder dict
__lowercase= {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase= {v: k for k, v in self.encoder.items()}
@property
def _A (self ):
return len(self.sp_model ) + self.offset
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A (self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase= self.sp_model.piece_to_id(lowerCAmelCase )
return sp_id + self.offset
def _A (self , lowerCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase= self.sp_model.IdToPiece(index - self.offset )
return token
def _A (self , lowerCAmelCase ):
__lowercase= []
__lowercase= ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowercase= []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def _A (self , lowerCAmelCase=False ):
return 1
def _A (self , lowerCAmelCase ):
__lowercase= set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 295
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase :
def __init__(self : Optional[Any] , _A : Dict , _A : str=13 , _A : List[Any]=7 , _A : int=True , _A : Union[str, Any]=True , _A : str=True , _A : Tuple=True , _A : Dict=99 , _A : Tuple=32 , _A : Optional[int]=5 , _A : Any=4 , _A : Dict=4 , _A : Optional[int]="gelu" , _A : Optional[int]=0.0 , _A : str=0.1 , _A : List[Any]=True , _A : Dict=5_12 , _A : Optional[Any]=16 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : Dict=3 , _A : List[Any]=4 , _A : Dict=None , ) -> Tuple:
__snake_case : int = parent
__snake_case : str = batch_size
__snake_case : str = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Any = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Optional[Any] = intermediate_multiple_size
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = hidden_dropout
__snake_case : Optional[Any] = attention_dropout
__snake_case : List[str] = weight_tying
__snake_case : int = max_position_embeddings
__snake_case : Tuple = type_vocab_size
__snake_case : List[str] = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Any = num_labels
__snake_case : Any = num_choices
__snake_case : Any = scope
def _lowercase (self : Union[str, Any]) -> Dict:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case : Any = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__snake_case : int = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase (self : str) -> Optional[Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase (self : str) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : str = self.prepare_config_and_inputs()
__snake_case : Dict = True
return config, input_ids, input_mask, token_labels
def _lowercase (self : Any , _A : Tuple , _A : Tuple , _A : Any) -> str:
__snake_case : Optional[int] = GPTNeoXJapaneseModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(_A , attention_mask=_A)
__snake_case : List[Any] = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : str , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str]) -> Union[str, Any]:
__snake_case : Union[str, Any] = True
__snake_case : int = GPTNeoXJapaneseModel(_A)
model.to(_A)
model.eval()
__snake_case : Any = model(_A , attention_mask=_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowercase (self : int , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : Optional[Any]) -> int:
__snake_case : str = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
__snake_case : Optional[int] = model(_A , attention_mask=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowercase (self : List[Any] , _A : Dict , _A : Optional[Any] , _A : Any) -> List[str]:
__snake_case : List[Any] = True
__snake_case : str = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
# first forward pass
__snake_case : Tuple = model(_A , attention_mask=_A , use_cache=_A)
__snake_case : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case : Any = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case : Optional[Any] = model(_A , attention_mask=_A , output_hidden_states=_A)
__snake_case : List[str] = output_from_no_past['hidden_states'][0]
__snake_case : Union[str, Any] = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3))
def _lowercase (self : Dict) -> Optional[int]:
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = False
def _lowercase (self : str) -> Any:
__snake_case : Optional[int] = GPTNeoXJapaneseModelTester(self)
__snake_case : Tuple = ConfigTester(self , config_class=_A , hidden_size=37)
def _lowercase (self : Tuple) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase (self : Tuple) -> List[Any]:
__snake_case , __snake_case , __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A)
def _lowercase (self : Any) -> Optional[Any]:
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def _lowercase (self : str) -> Dict:
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def _lowercase (self : Optional[Any]) -> int:
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A)
def _lowercase (self : List[str]) -> int:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A)
@slow
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : List[str] = 'abeja/gpt-neox-japanese-2.7b'
__snake_case : Tuple = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__snake_case : Optional[Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__snake_case : Union[str, Any] = GPTNeoXJapaneseTokenizer.from_pretrained(_A)
__snake_case : Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(_A)
__snake_case : Dict = []
for prompt in prompts:
__snake_case : List[Any] = tokenizer(_A , return_tensors='pt').input_ids
__snake_case : List[str] = model.generate(_A , max_length=50)
__snake_case : int = tokenizer.batch_decode(_A , skip_special_tokens=_A)
predicted_outputs += generated_string
self.assertListEqual(_A , _A)
| 95
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase ( lowercase ):
def _lowercase (self : Any) -> List[Any]:
__snake_case : Any = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(_A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks'))
class UpperCamelCase :
def __init__(self : Optional[int] , _A : Any , _A : str=13 , _A : List[str]=64 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=[2, 2, 2, 2] , _A : Tuple=[8, 4, 2, 1] , _A : List[str]=[16, 32, 64, 1_28] , _A : int=[1, 4, 8, 16] , _A : List[str]=[1, 2, 4, 8] , _A : Dict=True , _A : Any=True , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=0.02 , _A : str=3 , _A : int=None , ) -> List[Any]:
__snake_case : int = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[str] = num_channels
__snake_case : Any = num_encoder_blocks
__snake_case : Dict = sr_ratios
__snake_case : Any = depths
__snake_case : Tuple = hidden_sizes
__snake_case : Tuple = downsampling_rates
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : List[Any] = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Dict = scope
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Any) -> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]) -> int:
__snake_case : Union[str, Any] = SegformerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(_A)
__snake_case : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowercase (self : Tuple , _A : Dict , _A : Any , _A : int) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__snake_case : Dict = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str) -> List[Any]:
__snake_case : List[Any] = 1
__snake_case : str = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
__snake_case : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
__snake_case : Any = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Any) -> Optional[int]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Optional[int] = SegformerModelTester(self)
__snake_case : Any = SegformerConfigTester(self , config_class=_A)
def _lowercase (self : List[str]) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[int]) -> str:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowercase (self : Union[str, Any]) -> str:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowercase (self : int) -> str:
pass
def _lowercase (self : List[str]) -> Any:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : List[str]) -> List[Any]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = True
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : int = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(_A , _A))
__snake_case : Union[str, Any] = outputs.attentions
__snake_case : int = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Union[str, Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : int = (self.model_tester.image_size // 32) ** 2
__snake_case : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : int = len(_A)
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Tuple = True
__snake_case : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self : str) -> List[str]:
def check_hidden_states_output(_A : Union[str, Any] , _A : List[str] , _A : Tuple):
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(_A , _A))
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : Optional[int]) -> int:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.train()
__snake_case : str = self._prepare_for_class(_A , _A , return_labels=_A)
__snake_case : Dict = model(**_A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : Tuple) -> Dict:
pass
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any]) -> Any:
# only resize + normalize
__snake_case : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Tuple = image_processor(images=_A , return_tensors='pt')
__snake_case : List[str] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Tuple = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def _lowercase (self : Any) -> Optional[int]:
# only resize + normalize
__snake_case : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A)
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : str = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Any = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : List[Any] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def _lowercase (self : Optional[int]) -> Union[str, Any]:
# only resize + normalize
__snake_case : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : Union[str, Any] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[Any] = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)])
__snake_case : Any = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , _A)
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A)
__snake_case : str = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , _A)
| 95
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Tuple = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''ctrl'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCAmelCase=24_6534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = vocab_size
__A : Any = n_positions
__A : List[str] = n_embd
__A : Any = n_layer
__A : Optional[int] = n_head
__A : Optional[Any] = dff
__A : Any = resid_pdrop
__A : Optional[int] = embd_pdrop
__A : Optional[Any] = layer_norm_epsilon
__A : Dict = initializer_range
__A : Optional[Any] = use_cache
super().__init__(**A_)
| 190
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 2**power
lowerCamelCase_ = 0
while n:
lowerCamelCase_ , lowerCamelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 204
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if len(_UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(_UpperCAmelCase , n - 1 )
rec_insertion_sort(_UpperCAmelCase , n - 1 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
if index >= len(_UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCamelCase__ , lowerCamelCase__ : Tuple = (
collection[index],
collection[index - 1],
)
insert_next(_UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = input("""Enter integers separated by spaces: """)
_UpperCAmelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 353
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """efficientnet"""
def __init__( self : Tuple , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : int , ) -> Any:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Union[str, Any] = width_coefficient
lowerCamelCase__ : Optional[Any] = depth_coefficient
lowerCamelCase__ : Union[str, Any] = depth_divisor
lowerCamelCase__ : Dict = kernel_sizes
lowerCamelCase__ : Union[str, Any] = in_channels
lowerCamelCase__ : Dict = out_channels
lowerCamelCase__ : Dict = depthwise_padding
lowerCamelCase__ : int = strides
lowerCamelCase__ : List[str] = num_block_repeats
lowerCamelCase__ : Optional[Any] = expand_ratios
lowerCamelCase__ : List[str] = squeeze_expansion_ratio
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = hidden_dim
lowerCamelCase__ : int = pooling_type
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = batch_norm_eps
lowerCamelCase__ : List[Any] = batch_norm_momentum
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : List[Any] = sum(UpperCAmelCase ) * 4
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self : List[Any] ) -> float:
return 1e-5
| 45
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="Translation" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __call__( self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
A_ = None
A_ = None
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="TranslationVariableLanguages" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
__a : int = len(self.languages ) if self.languages else None
def __call__( self ):
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({", ".join(__a )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a : str = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def __UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 27
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3FeatureExtractor''']
__A = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) + 1
__lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCAmelCase : List[str] = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
# since string of zero length match pattern of zero length
__lowerCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(1 , SCREAMING_SNAKE_CASE ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCAmelCase : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCAmelCase : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCAmelCase : int = dp[i - 1][j]
else:
__lowerCAmelCase : Any = 0
else:
__lowerCAmelCase : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase = 'aab'
_UpperCAmelCase = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 232
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__lowerCAmelCase : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase : str = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase : str = 1
return True
__lowerCAmelCase : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase : Optional[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase : Tuple = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
__lowerCAmelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.